[llvm] 6858a92 - [RISCV] Support vector type strict_[su]int_to_fp and strict_fp_to_[su]int.
Yeting Kuo via llvm-commits
llvm-commits at lists.llvm.org
Wed Apr 5 19:09:52 PDT 2023
Author: Yeting Kuo
Date: 2023-04-06T10:09:44+08:00
New Revision: 6858a920b86ace2ea529bfd544cd96bbff612704
URL: https://github.com/llvm/llvm-project/commit/6858a920b86ace2ea529bfd544cd96bbff612704
DIFF: https://github.com/llvm/llvm-project/commit/6858a920b86ace2ea529bfd544cd96bbff612704.diff
LOG: [RISCV] Support vector type strict_[su]int_to_fp and strict_fp_to_[su]int.
Also the patch loose the fixed vector contraint in llvm/lib/IR/Verifier.cpp.
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D147380
Added:
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll
llvm/test/CodeGen/RISCV/rvv/vfptoi-constrained-sdnode.ll
llvm/test/CodeGen/RISCV/rvv/vitofp-constrained-sdnode.ll
Modified:
llvm/lib/IR/Verifier.cpp
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/lib/Target/RISCV/RISCVISelLowering.h
llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
Removed:
################################################################################
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 90dd6efdf54a5..28234af2488fd 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -6072,20 +6072,20 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
case Intrinsic::experimental_constrained_fptosi:
case Intrinsic::experimental_constrained_fptoui: {
Value *Operand = FPI.getArgOperand(0);
- uint64_t NumSrcElem = 0;
+ ElementCount SrcEC;
Check(Operand->getType()->isFPOrFPVectorTy(),
"Intrinsic first argument must be floating point", &FPI);
if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
- NumSrcElem = cast<FixedVectorType>(OperandT)->getNumElements();
+ SrcEC = cast<VectorType>(OperandT)->getElementCount();
}
Operand = &FPI;
- Check((NumSrcElem > 0) == Operand->getType()->isVectorTy(),
+ Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
"Intrinsic first argument and result disagree on vector use", &FPI);
Check(Operand->getType()->isIntOrIntVectorTy(),
"Intrinsic result must be an integer", &FPI);
if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
- Check(NumSrcElem == cast<FixedVectorType>(OperandT)->getNumElements(),
+ Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
"Intrinsic first argument and result vector lengths must be equal",
&FPI);
}
@@ -6095,20 +6095,20 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
case Intrinsic::experimental_constrained_sitofp:
case Intrinsic::experimental_constrained_uitofp: {
Value *Operand = FPI.getArgOperand(0);
- uint64_t NumSrcElem = 0;
+ ElementCount SrcEC;
Check(Operand->getType()->isIntOrIntVectorTy(),
"Intrinsic first argument must be integer", &FPI);
if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
- NumSrcElem = cast<FixedVectorType>(OperandT)->getNumElements();
+ SrcEC = cast<VectorType>(OperandT)->getElementCount();
}
Operand = &FPI;
- Check((NumSrcElem > 0) == Operand->getType()->isVectorTy(),
+ Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
"Intrinsic first argument and result disagree on vector use", &FPI);
Check(Operand->getType()->isFPOrFPVectorTy(),
"Intrinsic result must be a floating point", &FPI);
if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
- Check(NumSrcElem == cast<FixedVectorType>(OperandT)->getNumElements(),
+ Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
"Intrinsic first argument and result vector lengths must be equal",
&FPI);
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 8e166d28992a0..89d7eef4bfd07 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -602,9 +602,11 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
// element type sizes are within one power-of-two of each other. Any
// wider distances between type sizes have to be lowered as sequences
// which progressively narrow the gap in stages.
- setOperationAction(
- {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
- VT, Custom);
+ setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT,
+ ISD::FP_TO_UINT, ISD::STRICT_SINT_TO_FP,
+ ISD::STRICT_UINT_TO_FP, ISD::STRICT_FP_TO_SINT,
+ ISD::STRICT_FP_TO_UINT},
+ VT, Custom);
setOperationAction({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, VT,
Custom);
@@ -666,9 +668,11 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
// element type sizes are within one power-of-two of each other. Any
// wider distances between type sizes have to be lowered as sequences
// which progressively narrow the gap in stages.
- setOperationAction(
- {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
- VT, Custom);
+ setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT,
+ ISD::FP_TO_UINT, ISD::STRICT_SINT_TO_FP,
+ ISD::STRICT_UINT_TO_FP, ISD::STRICT_FP_TO_SINT,
+ ISD::STRICT_FP_TO_UINT},
+ VT, Custom);
setOperationAction({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, VT,
Custom);
@@ -891,9 +895,18 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
{ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
Custom);
- setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT,
- ISD::FP_TO_UINT},
- VT, Custom);
+ setOperationAction(
+ {
+ ISD::SINT_TO_FP,
+ ISD::UINT_TO_FP,
+ ISD::FP_TO_SINT,
+ ISD::FP_TO_UINT,
+ ISD::STRICT_SINT_TO_FP,
+ ISD::STRICT_UINT_TO_FP,
+ ISD::STRICT_FP_TO_SINT,
+ ISD::STRICT_FP_TO_UINT,
+ },
+ VT, Custom);
setOperationAction({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, VT,
Custom);
@@ -4155,7 +4168,11 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
case ISD::SINT_TO_FP:
- case ISD::UINT_TO_FP: {
+ case ISD::UINT_TO_FP:
+ case ISD::STRICT_FP_TO_SINT:
+ case ISD::STRICT_FP_TO_UINT:
+ case ISD::STRICT_SINT_TO_FP:
+ case ISD::STRICT_UINT_TO_FP: {
// RVV can only do fp<->int conversions to types half/double the size as
// the source. We custom-lower any conversions that do two hops into
// sequences.
@@ -4163,7 +4180,8 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
if (!VT.isVector())
return Op;
SDLoc DL(Op);
- SDValue Src = Op.getOperand(0);
+ bool IsStrict = Op->isStrictFPOpcode();
+ SDValue Src = Op.getOperand(0 + IsStrict);
MVT EltVT = VT.getVectorElementType();
MVT SrcVT = Src.getSimpleValueType();
MVT SrcEltVT = SrcVT.getVectorElementType();
@@ -4179,10 +4197,14 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
// Do a regular integer sign/zero extension then convert to float.
MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize / 2),
VT.getVectorElementCount());
- unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
+ unsigned ExtOpcode = (Op.getOpcode() == ISD::UINT_TO_FP ||
+ Op.getOpcode() == ISD::STRICT_UINT_TO_FP)
? ISD::ZERO_EXTEND
: ISD::SIGN_EXTEND;
SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
+ if (IsStrict)
+ return DAG.getNode(Op.getOpcode(), DL, Op->getVTList(),
+ Op.getOperand(0), Ext);
return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
}
// FP2Int
@@ -4190,6 +4212,11 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
// Do one doubling fp_extend then complete the operation by converting
// to int.
MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
+ if (IsStrict) {
+ auto [FExt, Chain] =
+ DAG.getStrictFPExtendOrRound(Src, Op.getOperand(0), DL, InterimFVT);
+ return DAG.getNode(Op.getOpcode(), DL, Op->getVTList(), Chain, FExt);
+ }
SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
}
@@ -4200,6 +4227,13 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
// One narrowing int_to_fp, then an fp_round.
assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
+ if (IsStrict) {
+ SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL,
+ DAG.getVTList(InterimFVT, MVT::Other),
+ Op.getOperand(0), Src);
+ SDValue Chain = Int2FP.getValue(1);
+ return DAG.getStrictFPExtendOrRound(Int2FP, Chain, DL, VT).first;
+ }
SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
return DAG.getFPExtendOrRound(Int2FP, DL, VT);
}
@@ -4208,6 +4242,13 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
// representable by the integer, the result is poison.
MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
VT.getVectorElementCount());
+ if (IsStrict) {
+ SDValue FP2Int =
+ DAG.getNode(Op.getOpcode(), DL, DAG.getVTList(IVecVT, MVT::Other),
+ Op.getOperand(0), Src);
+ SDValue Res = DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
+ return DAG.getMergeValues({Res, FP2Int.getValue(1)}, DL);
+ }
SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
}
@@ -4234,6 +4275,18 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
case ISD::UINT_TO_FP:
RVVOpc = RISCVISD::UINT_TO_FP_VL;
break;
+ case ISD::STRICT_FP_TO_SINT:
+ RVVOpc = RISCVISD::STRICT_VFCVT_RTZ_X_F_VL;
+ break;
+ case ISD::STRICT_FP_TO_UINT:
+ RVVOpc = RISCVISD::STRICT_VFCVT_RTZ_XU_F_VL;
+ break;
+ case ISD::STRICT_SINT_TO_FP:
+ RVVOpc = RISCVISD::STRICT_SINT_TO_FP_VL;
+ break;
+ case ISD::STRICT_UINT_TO_FP:
+ RVVOpc = RISCVISD::STRICT_UINT_TO_FP_VL;
+ break;
}
MVT ContainerVT = getContainerForFixedLengthVector(VT);
@@ -4244,6 +4297,12 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
+ if (IsStrict) {
+ Src = DAG.getNode(RVVOpc, DL, DAG.getVTList(ContainerVT, MVT::Other),
+ Op.getOperand(0), Src, Mask, VL);
+ SDValue SubVec = convertFromScalableVector(VT, Src, DAG, Subtarget);
+ return DAG.getMergeValues({SubVec, Src.getValue(1)}, DL);
+ }
Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
return convertFromScalableVector(VT, Src, DAG, Subtarget);
}
@@ -14139,6 +14198,10 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(STRICT_FP_ROUND_VL)
NODE_NAME_CASE(STRICT_FP_EXTEND_VL)
NODE_NAME_CASE(STRICT_VFNCVT_ROD_VL)
+ NODE_NAME_CASE(STRICT_SINT_TO_FP_VL)
+ NODE_NAME_CASE(STRICT_UINT_TO_FP_VL)
+ NODE_NAME_CASE(STRICT_VFCVT_RTZ_X_F_VL)
+ NODE_NAME_CASE(STRICT_VFCVT_RTZ_XU_F_VL)
NODE_NAME_CASE(VWMUL_VL)
NODE_NAME_CASE(VWMULU_VL)
NODE_NAME_CASE(VWMULSU_VL)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index ecef5aa7786ef..90ea5f0b9ea19 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -342,6 +342,10 @@ enum NodeType : unsigned {
STRICT_FP_ROUND_VL,
STRICT_FP_EXTEND_VL,
STRICT_VFNCVT_ROD_VL,
+ STRICT_SINT_TO_FP_VL,
+ STRICT_UINT_TO_FP_VL,
+ STRICT_VFCVT_RTZ_X_F_VL,
+ STRICT_VFCVT_RTZ_XU_F_VL,
// WARNING: Do not add anything in the end unless you want the node to
// have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 816a3dce5fb7f..70ccdbf6e814d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -373,7 +373,8 @@ multiclass VPatExtendSDNode_V<list<SDNode> ops, string inst_name, string suffix,
}
}
-multiclass VPatConvertI2FPSDNode_V<SDNode vop, string instruction_name> {
+multiclass VPatConvertI2FPSDNode_V<SDPatternOperator vop,
+ string instruction_name> {
foreach fvti = AllFloatVectors in {
defvar ivti = GetIntVTypeInfo<fvti>.Vti;
def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))),
@@ -382,7 +383,8 @@ multiclass VPatConvertI2FPSDNode_V<SDNode vop, string instruction_name> {
}
}
-multiclass VPatConvertFP2ISDNode_V<SDNode vop, string instruction_name> {
+multiclass VPatConvertFP2ISDNode_V<SDPatternOperator vop,
+ string instruction_name> {
foreach fvti = AllFloatVectors in {
defvar ivti = GetIntVTypeInfo<fvti>.Vti;
def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))),
@@ -391,7 +393,8 @@ multiclass VPatConvertFP2ISDNode_V<SDNode vop, string instruction_name> {
}
}
-multiclass VPatWConvertI2FPSDNode_V<SDNode vop, string instruction_name> {
+multiclass VPatWConvertI2FPSDNode_V<SDPatternOperator vop,
+ string instruction_name> {
foreach vtiToWti = AllWidenableIntToFloatVectors in {
defvar ivti = vtiToWti.Vti;
defvar fwti = vtiToWti.Wti;
@@ -401,7 +404,8 @@ multiclass VPatWConvertI2FPSDNode_V<SDNode vop, string instruction_name> {
}
}
-multiclass VPatWConvertFP2ISDNode_V<SDNode vop, string instruction_name> {
+multiclass VPatWConvertFP2ISDNode_V<SDPatternOperator vop,
+ string instruction_name> {
foreach fvtiToFWti = AllWidenableFloatVectors in {
defvar fvti = fvtiToFWti.Vti;
defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
@@ -411,7 +415,8 @@ multiclass VPatWConvertFP2ISDNode_V<SDNode vop, string instruction_name> {
}
}
-multiclass VPatNConvertI2FPSDNode_W<SDNode vop, string instruction_name> {
+multiclass VPatNConvertI2FPSDNode_W<SDPatternOperator vop,
+ string instruction_name> {
foreach fvtiToFWti = AllWidenableFloatVectors in {
defvar fvti = fvtiToFWti.Vti;
defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
@@ -421,7 +426,8 @@ multiclass VPatNConvertI2FPSDNode_W<SDNode vop, string instruction_name> {
}
}
-multiclass VPatNConvertFP2ISDNode_W<SDNode vop, string instruction_name> {
+multiclass VPatNConvertFP2ISDNode_W<SDPatternOperator vop,
+ string instruction_name> {
foreach vtiToWti = AllWidenableIntToFloatVectors in {
defvar vti = vtiToWti.Vti;
defvar fwti = vtiToWti.Wti;
@@ -1080,22 +1086,22 @@ foreach fvti = AllFloatVectors in {
}
// 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions
-defm : VPatConvertFP2ISDNode_V<fp_to_sint, "PseudoVFCVT_RTZ_X_F_V">;
-defm : VPatConvertFP2ISDNode_V<fp_to_uint, "PseudoVFCVT_RTZ_XU_F_V">;
-defm : VPatConvertI2FPSDNode_V<sint_to_fp, "PseudoVFCVT_F_X_V">;
-defm : VPatConvertI2FPSDNode_V<uint_to_fp, "PseudoVFCVT_F_XU_V">;
+defm : VPatConvertFP2ISDNode_V<any_fp_to_sint, "PseudoVFCVT_RTZ_X_F_V">;
+defm : VPatConvertFP2ISDNode_V<any_fp_to_uint, "PseudoVFCVT_RTZ_XU_F_V">;
+defm : VPatConvertI2FPSDNode_V<any_sint_to_fp, "PseudoVFCVT_F_X_V">;
+defm : VPatConvertI2FPSDNode_V<any_uint_to_fp, "PseudoVFCVT_F_XU_V">;
// 13.18. Widening Floating-Point/Integer Type-Convert Instructions
-defm : VPatWConvertFP2ISDNode_V<fp_to_sint, "PseudoVFWCVT_RTZ_X_F_V">;
-defm : VPatWConvertFP2ISDNode_V<fp_to_uint, "PseudoVFWCVT_RTZ_XU_F_V">;
-defm : VPatWConvertI2FPSDNode_V<sint_to_fp, "PseudoVFWCVT_F_X_V">;
-defm : VPatWConvertI2FPSDNode_V<uint_to_fp, "PseudoVFWCVT_F_XU_V">;
+defm : VPatWConvertFP2ISDNode_V<any_fp_to_sint, "PseudoVFWCVT_RTZ_X_F_V">;
+defm : VPatWConvertFP2ISDNode_V<any_fp_to_uint, "PseudoVFWCVT_RTZ_XU_F_V">;
+defm : VPatWConvertI2FPSDNode_V<any_sint_to_fp, "PseudoVFWCVT_F_X_V">;
+defm : VPatWConvertI2FPSDNode_V<any_uint_to_fp, "PseudoVFWCVT_F_XU_V">;
// 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions
-defm : VPatNConvertFP2ISDNode_W<fp_to_sint, "PseudoVFNCVT_RTZ_X_F_W">;
-defm : VPatNConvertFP2ISDNode_W<fp_to_uint, "PseudoVFNCVT_RTZ_XU_F_W">;
-defm : VPatNConvertI2FPSDNode_W<sint_to_fp, "PseudoVFNCVT_F_X_W">;
-defm : VPatNConvertI2FPSDNode_W<uint_to_fp, "PseudoVFNCVT_F_XU_W">;
+defm : VPatNConvertFP2ISDNode_W<any_fp_to_sint, "PseudoVFNCVT_RTZ_X_F_W">;
+defm : VPatNConvertFP2ISDNode_W<any_fp_to_uint, "PseudoVFNCVT_RTZ_XU_F_W">;
+defm : VPatNConvertI2FPSDNode_W<any_sint_to_fp, "PseudoVFNCVT_F_X_W">;
+defm : VPatNConvertI2FPSDNode_W<any_uint_to_fp, "PseudoVFNCVT_F_XU_W">;
foreach fvtiToFWti = AllWidenableFloatVectors in {
defvar fvti = fvtiToFWti.Vti;
defvar fwti = fvtiToFWti.Wti;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 96238d7e2f8a2..03487a5f1c366 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -214,12 +214,31 @@ def riscv_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_
def riscv_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL>;
def riscv_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL>;
+def riscv_strict_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>;
+def riscv_strict_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>;
+
+def any_riscv_vfcvt_rtz_xu_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl),
+ [(riscv_vfcvt_rtz_xu_f_vl node:$src, node:$mask, node:$vl),
+ (riscv_strict_vfcvt_rtz_xu_f_vl node:$src, node:$mask, node:$vl)]>;
+def any_riscv_vfcvt_rtz_x_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl),
+ [(riscv_vfcvt_rtz_x_f_vl node:$src, node:$mask, node:$vl),
+ (riscv_strict_vfcvt_rtz_x_f_vl node:$src, node:$mask, node:$vl)]>;
+
// Int -> Float
def riscv_sint_to_fp_vl : SDNode<"RISCVISD::SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>;
def riscv_uint_to_fp_vl : SDNode<"RISCVISD::UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>;
def riscv_vfcvt_rm_f_xu_vl : SDNode<"RISCVISD::VFCVT_RM_F_XU_VL", SDT_RISCVI2FPOp_RM_VL>;
def riscv_vfcvt_rm_f_x_vl : SDNode<"RISCVISD::VFCVT_RM_F_X_VL", SDT_RISCVI2FPOp_RM_VL>;
+def riscv_strict_sint_to_fp_vl : SDNode<"RISCVISD::STRICT_SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>;
+def riscv_strict_uint_to_fp_vl : SDNode<"RISCVISD::STRICT_UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>;
+
+def any_riscv_sint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl),
+ [(riscv_sint_to_fp_vl node:$src, node:$mask, node:$vl),
+ (riscv_strict_sint_to_fp_vl node:$src, node:$mask, node:$vl)]>;
+def any_riscv_uint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl),
+ [(riscv_uint_to_fp_vl node:$src, node:$mask, node:$vl),
+ (riscv_strict_uint_to_fp_vl node:$src, node:$mask, node:$vl)]>;
def riscv_vfround_noexcept_vl: SDNode<"RISCVISD::VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL>;
@@ -950,7 +969,7 @@ multiclass VPatExtendVL_V<SDNode vop, string inst_name, string suffix,
// Single width converting
-multiclass VPatConvertFP2IVL_V<SDNode vop, string instruction_name> {
+multiclass VPatConvertFP2IVL_V<SDPatternOperator vop, string instruction_name> {
foreach fvti = AllFloatVectors in {
defvar ivti = GetIntVTypeInfo<fvti>.Vti;
def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1),
@@ -975,7 +994,7 @@ multiclass VPatConvertFP2I_RM_VL_V<SDNode vop, string instruction_name> {
}
}
-multiclass VPatConvertI2FPVL_V<SDNode vop, string instruction_name> {
+multiclass VPatConvertI2FPVL_V<SDPatternOperator vop, string instruction_name> {
foreach fvti = AllFloatVectors in {
defvar ivti = GetIntVTypeInfo<fvti>.Vti;
def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1),
@@ -1001,7 +1020,7 @@ multiclass VPatConvertI2FP_RM_VL_V<SDNode vop, string instruction_name> {
// Widening converting
-multiclass VPatWConvertFP2IVL_V<SDNode vop, string instruction_name> {
+multiclass VPatWConvertFP2IVL_V<SDPatternOperator vop, string instruction_name> {
foreach fvtiToFWti = AllWidenableFloatVectors in {
defvar fvti = fvtiToFWti.Vti;
defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
@@ -1027,7 +1046,8 @@ multiclass VPatWConvertFP2I_RM_VL_V<SDNode vop, string instruction_name> {
}
}
-multiclass VPatWConvertI2FPVL_V<SDNode vop, string instruction_name> {
+multiclass VPatWConvertI2FPVL_V<SDPatternOperator vop,
+ string instruction_name> {
foreach vtiToWti = AllWidenableIntToFloatVectors in {
defvar ivti = vtiToWti.Vti;
defvar fwti = vtiToWti.Wti;
@@ -1055,7 +1075,8 @@ multiclass VPatWConvertI2FP_RM_VL_V<SDNode vop, string instruction_name> {
// Narrowing converting
-multiclass VPatNConvertFP2IVL_W<SDNode vop, string instruction_name> {
+multiclass VPatNConvertFP2IVL_W<SDPatternOperator vop,
+ string instruction_name> {
// Reuse the same list of types used in the widening nodes, but just swap the
// direction of types around so we're converting from Wti -> Vti
foreach vtiToWti = AllWidenableIntToFloatVectors in {
@@ -1083,7 +1104,8 @@ multiclass VPatNConvertFP2I_RM_VL_W<SDNode vop, string instruction_name> {
}
}
-multiclass VPatNConvertI2FPVL_W<SDNode vop, string instruction_name> {
+multiclass VPatNConvertI2FPVL_W<SDPatternOperator vop,
+ string instruction_name> {
foreach fvtiToFWti = AllWidenableFloatVectors in {
defvar fvti = fvtiToFWti.Vti;
defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
@@ -2002,11 +2024,11 @@ foreach fvti = AllFloatVectors in {
defm : VPatConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFCVT_RM_XU_F_V">;
defm : VPatConvertFP2I_RM_VL_V<riscv_vfcvt_rm_x_f_vl, "PseudoVFCVT_RM_X_F_V">;
- defm : VPatConvertFP2IVL_V<riscv_vfcvt_rtz_xu_f_vl, "PseudoVFCVT_RTZ_XU_F_V">;
- defm : VPatConvertFP2IVL_V<riscv_vfcvt_rtz_x_f_vl, "PseudoVFCVT_RTZ_X_F_V">;
+ defm : VPatConvertFP2IVL_V<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFCVT_RTZ_XU_F_V">;
+ defm : VPatConvertFP2IVL_V<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFCVT_RTZ_X_F_V">;
- defm : VPatConvertI2FPVL_V<riscv_uint_to_fp_vl, "PseudoVFCVT_F_XU_V">;
- defm : VPatConvertI2FPVL_V<riscv_sint_to_fp_vl, "PseudoVFCVT_F_X_V">;
+ defm : VPatConvertI2FPVL_V<any_riscv_uint_to_fp_vl, "PseudoVFCVT_F_XU_V">;
+ defm : VPatConvertI2FPVL_V<any_riscv_sint_to_fp_vl, "PseudoVFCVT_F_X_V">;
defm : VPatConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_xu_vl, "PseudoVFCVT_RM_F_XU_V">;
defm : VPatConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_x_vl, "PseudoVFCVT_RM_F_X_V">;
@@ -2017,11 +2039,11 @@ foreach fvti = AllFloatVectors in {
defm : VPatWConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFWCVT_RM_XU_F_V">;
defm : VPatWConvertFP2I_RM_VL_V<riscv_vfcvt_rm_x_f_vl, "PseudoVFWCVT_RM_X_F_V">;
- defm : VPatWConvertFP2IVL_V<riscv_vfcvt_rtz_xu_f_vl, "PseudoVFWCVT_RTZ_XU_F_V">;
- defm : VPatWConvertFP2IVL_V<riscv_vfcvt_rtz_x_f_vl, "PseudoVFWCVT_RTZ_X_F_V">;
+ defm : VPatWConvertFP2IVL_V<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFWCVT_RTZ_XU_F_V">;
+ defm : VPatWConvertFP2IVL_V<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFWCVT_RTZ_X_F_V">;
- defm : VPatWConvertI2FPVL_V<riscv_uint_to_fp_vl, "PseudoVFWCVT_F_XU_V">;
- defm : VPatWConvertI2FPVL_V<riscv_sint_to_fp_vl, "PseudoVFWCVT_F_X_V">;
+ defm : VPatWConvertI2FPVL_V<any_riscv_uint_to_fp_vl, "PseudoVFWCVT_F_XU_V">;
+ defm : VPatWConvertI2FPVL_V<any_riscv_sint_to_fp_vl, "PseudoVFWCVT_F_X_V">;
defm : VPatWConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_xu_vl, "PseudoVFWCVT_RM_F_XU_V">;
defm : VPatWConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_x_vl, "PseudoVFWCVT_RM_F_X_V">;
@@ -2044,11 +2066,11 @@ foreach fvti = AllFloatVectors in {
defm : VPatNConvertFP2I_RM_VL_W<riscv_vfcvt_rm_xu_f_vl, "PseudoVFNCVT_RM_XU_F_W">;
defm : VPatNConvertFP2I_RM_VL_W<riscv_vfcvt_rm_x_f_vl, "PseudoVFNCVT_RM_X_F_W">;
- defm : VPatNConvertFP2IVL_W<riscv_vfcvt_rtz_xu_f_vl, "PseudoVFNCVT_RTZ_XU_F_W">;
- defm : VPatNConvertFP2IVL_W<riscv_vfcvt_rtz_x_f_vl, "PseudoVFNCVT_RTZ_X_F_W">;
+ defm : VPatNConvertFP2IVL_W<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFNCVT_RTZ_XU_F_W">;
+ defm : VPatNConvertFP2IVL_W<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFNCVT_RTZ_X_F_W">;
- defm : VPatNConvertI2FPVL_W<riscv_uint_to_fp_vl, "PseudoVFNCVT_F_XU_W">;
- defm : VPatNConvertI2FPVL_W<riscv_sint_to_fp_vl, "PseudoVFNCVT_F_X_W">;
+ defm : VPatNConvertI2FPVL_W<any_riscv_uint_to_fp_vl, "PseudoVFNCVT_F_XU_W">;
+ defm : VPatNConvertI2FPVL_W<any_riscv_sint_to_fp_vl, "PseudoVFNCVT_F_X_W">;
defm : VPatNConvertI2FP_RM_VL_W<riscv_vfcvt_rm_f_xu_vl, "PseudoVFNCVT_RM_F_XU_W">;
defm : VPatNConvertI2FP_RM_VL_W<riscv_vfcvt_rm_f_x_vl, "PseudoVFNCVT_RM_F_X_W">;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll
new file mode 100644
index 0000000000000..bbe900dc5d229
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll
@@ -0,0 +1,1777 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+
+declare <1 x i1> @llvm.experimental.constrained.fptosi.v1i1.v1f16(<1 x half>, metadata)
+define <1 x i1> @vfptosi_v1f16_v1i1(<1 x half> %va) {
+; CHECK-LABEL: vfptosi_v1f16_v1i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <1 x i1> @llvm.experimental.constrained.fptosi.v1i1.v1f16(<1 x half> %va, metadata !"fpexcept.strict")
+ ret <1 x i1> %evec
+}
+
+declare <1 x i1> @llvm.experimental.constrained.fptoui.v1i1.v1f16(<1 x half>, metadata)
+define <1 x i1> @vfptoui_v1f16_v1i1(<1 x half> %va) {
+; CHECK-LABEL: vfptoui_v1f16_v1i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <1 x i1> @llvm.experimental.constrained.fptoui.v1i1.v1f16(<1 x half> %va, metadata !"fpexcept.strict")
+ ret <1 x i1> %evec
+}
+
+declare <1 x i7> @llvm.experimental.constrained.fptosi.v1i7.v1f16(<1 x half>, metadata)
+define <1 x i7> @vfptosi_v1f16_v1i7(<1 x half> %va) {
+ %evec = call <1 x i7> @llvm.experimental.constrained.fptosi.v1i7.v1f16(<1 x half> %va, metadata !"fpexcept.strict")
+ ret <1 x i7> %evec
+}
+
+declare <1 x i7> @llvm.experimental.constrained.fptoui.v1i7.v1f16(<1 x half>, metadata)
+define <1 x i7> @vfptoui_v1f16_v1i7(<1 x half> %va) {
+ %evec = call <1 x i7> @llvm.experimental.constrained.fptoui.v1i7.v1f16(<1 x half> %va, metadata !"fpexcept.strict")
+ ret <1 x i7> %evec
+}
+
+declare <1 x i8> @llvm.experimental.constrained.fptosi.v1i8.v1f16(<1 x half>, metadata)
+define <1 x i8> @vfptosi_v1f16_v1i8(<1 x half> %va) {
+; CHECK-LABEL: vfptosi_v1f16_v1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x i8> @llvm.experimental.constrained.fptosi.v1i8.v1f16(<1 x half> %va, metadata !"fpexcept.strict")
+ ret <1 x i8> %evec
+}
+
+declare <1 x i8> @llvm.experimental.constrained.fptoui.v1i8.v1f16(<1 x half>, metadata)
+define <1 x i8> @vfptoui_v1f16_v1i8(<1 x half> %va) {
+; CHECK-LABEL: vfptoui_v1f16_v1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x i8> @llvm.experimental.constrained.fptoui.v1i8.v1f16(<1 x half> %va, metadata !"fpexcept.strict")
+ ret <1 x i8> %evec
+}
+
+declare <1 x i16> @llvm.experimental.constrained.fptosi.v1i16.v1f16(<1 x half>, metadata)
+define <1 x i16> @vfptosi_v1f16_v1i16(<1 x half> %va) {
+; CHECK-LABEL: vfptosi_v1f16_v1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <1 x i16> @llvm.experimental.constrained.fptosi.v1i16.v1f16(<1 x half> %va, metadata !"fpexcept.strict")
+ ret <1 x i16> %evec
+}
+
+declare <1 x i16> @llvm.experimental.constrained.fptoui.v1i16.v1f16(<1 x half>, metadata)
+define <1 x i16> @vfptoui_v1f16_v1i16(<1 x half> %va) {
+; CHECK-LABEL: vfptoui_v1f16_v1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <1 x i16> @llvm.experimental.constrained.fptoui.v1i16.v1f16(<1 x half> %va, metadata !"fpexcept.strict")
+ ret <1 x i16> %evec
+}
+
+declare <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f16(<1 x half>, metadata)
+define <1 x i32> @vfptosi_v1f16_v1i32(<1 x half> %va) {
+; CHECK-LABEL: vfptosi_v1f16_v1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f16(<1 x half> %va, metadata !"fpexcept.strict")
+ ret <1 x i32> %evec
+}
+
+declare <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f16(<1 x half>, metadata)
+define <1 x i32> @vfptoui_v1f16_v1i32(<1 x half> %va) {
+; CHECK-LABEL: vfptoui_v1f16_v1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f16(<1 x half> %va, metadata !"fpexcept.strict")
+ ret <1 x i32> %evec
+}
+
+declare <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f16(<1 x half>, metadata)
+define <1 x i64> @vfptosi_v1f16_v1i64(<1 x half> %va) {
+; CHECK-LABEL: vfptosi_v1f16_v1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f16(<1 x half> %va, metadata !"fpexcept.strict")
+ ret <1 x i64> %evec
+}
+
+declare <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f16(<1 x half>, metadata)
+define <1 x i64> @vfptoui_v1f16_v1i64(<1 x half> %va) {
+; CHECK-LABEL: vfptoui_v1f16_v1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f16(<1 x half> %va, metadata !"fpexcept.strict")
+ ret <1 x i64> %evec
+}
+
+declare <2 x i1> @llvm.experimental.constrained.fptosi.v2i1.v2f16(<2 x half>, metadata)
+define <2 x i1> @vfptosi_v2f16_v2i1(<2 x half> %va) {
+; CHECK-LABEL: vfptosi_v2f16_v2i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <2 x i1> @llvm.experimental.constrained.fptosi.v2i1.v2f16(<2 x half> %va, metadata !"fpexcept.strict")
+ ret <2 x i1> %evec
+}
+
+declare <2 x i1> @llvm.experimental.constrained.fptoui.v2i1.v2f16(<2 x half>, metadata)
+define <2 x i1> @vfptoui_v2f16_v2i1(<2 x half> %va) {
+; CHECK-LABEL: vfptoui_v2f16_v2i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <2 x i1> @llvm.experimental.constrained.fptoui.v2i1.v2f16(<2 x half> %va, metadata !"fpexcept.strict")
+ ret <2 x i1> %evec
+}
+
+declare <2 x i8> @llvm.experimental.constrained.fptosi.v2i8.v2f16(<2 x half>, metadata)
+define <2 x i8> @vfptosi_v2f16_v2i8(<2 x half> %va) {
+; CHECK-LABEL: vfptosi_v2f16_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x i8> @llvm.experimental.constrained.fptosi.v2i8.v2f16(<2 x half> %va, metadata !"fpexcept.strict")
+ ret <2 x i8> %evec
+}
+
+declare <2 x i8> @llvm.experimental.constrained.fptoui.v2i8.v2f16(<2 x half>, metadata)
+define <2 x i8> @vfptoui_v2f16_v2i8(<2 x half> %va) {
+; CHECK-LABEL: vfptoui_v2f16_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x i8> @llvm.experimental.constrained.fptoui.v2i8.v2f16(<2 x half> %va, metadata !"fpexcept.strict")
+ ret <2 x i8> %evec
+}
+
+declare <2 x i16> @llvm.experimental.constrained.fptosi.v2i16.v2f16(<2 x half>, metadata)
+define <2 x i16> @vfptosi_v2f16_v2i16(<2 x half> %va) {
+; CHECK-LABEL: vfptosi_v2f16_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <2 x i16> @llvm.experimental.constrained.fptosi.v2i16.v2f16(<2 x half> %va, metadata !"fpexcept.strict")
+ ret <2 x i16> %evec
+}
+
+declare <2 x i16> @llvm.experimental.constrained.fptoui.v2i16.v2f16(<2 x half>, metadata)
+define <2 x i16> @vfptoui_v2f16_v2i16(<2 x half> %va) {
+; CHECK-LABEL: vfptoui_v2f16_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <2 x i16> @llvm.experimental.constrained.fptoui.v2i16.v2f16(<2 x half> %va, metadata !"fpexcept.strict")
+ ret <2 x i16> %evec
+}
+
+declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f16(<2 x half>, metadata)
+define <2 x i32> @vfptosi_v2f16_v2i32(<2 x half> %va) {
+; CHECK-LABEL: vfptosi_v2f16_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f16(<2 x half> %va, metadata !"fpexcept.strict")
+ ret <2 x i32> %evec
+}
+
+declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f16(<2 x half>, metadata)
+define <2 x i32> @vfptoui_v2f16_v2i32(<2 x half> %va) {
+; CHECK-LABEL: vfptoui_v2f16_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f16(<2 x half> %va, metadata !"fpexcept.strict")
+ ret <2 x i32> %evec
+}
+
+declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f16(<2 x half>, metadata)
+define <2 x i64> @vfptosi_v2f16_v2i64(<2 x half> %va) {
+; CHECK-LABEL: vfptosi_v2f16_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f16(<2 x half> %va, metadata !"fpexcept.strict")
+ ret <2 x i64> %evec
+}
+
+declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f16(<2 x half>, metadata)
+define <2 x i64> @vfptoui_v2f16_v2i64(<2 x half> %va) {
+; CHECK-LABEL: vfptoui_v2f16_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f16(<2 x half> %va, metadata !"fpexcept.strict")
+ ret <2 x i64> %evec
+}
+
+declare <4 x i1> @llvm.experimental.constrained.fptosi.v4i1.v4f16(<4 x half>, metadata)
+define <4 x i1> @vfptosi_v4f16_v4i1(<4 x half> %va) {
+; CHECK-LABEL: vfptosi_v4f16_v4i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <4 x i1> @llvm.experimental.constrained.fptosi.v4i1.v4f16(<4 x half> %va, metadata !"fpexcept.strict")
+ ret <4 x i1> %evec
+}
+
+declare <4 x i1> @llvm.experimental.constrained.fptoui.v4i1.v4f16(<4 x half>, metadata)
+define <4 x i1> @vfptoui_v4f16_v4i1(<4 x half> %va) {
+; CHECK-LABEL: vfptoui_v4f16_v4i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <4 x i1> @llvm.experimental.constrained.fptoui.v4i1.v4f16(<4 x half> %va, metadata !"fpexcept.strict")
+ ret <4 x i1> %evec
+}
+
+declare <4 x i8> @llvm.experimental.constrained.fptosi.v4i8.v4f16(<4 x half>, metadata)
+define <4 x i8> @vfptosi_v4f16_v4i8(<4 x half> %va) {
+; CHECK-LABEL: vfptosi_v4f16_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <4 x i8> @llvm.experimental.constrained.fptosi.v4i8.v4f16(<4 x half> %va, metadata !"fpexcept.strict")
+ ret <4 x i8> %evec
+}
+
+declare <4 x i8> @llvm.experimental.constrained.fptoui.v4i8.v4f16(<4 x half>, metadata)
+define <4 x i8> @vfptoui_v4f16_v4i8(<4 x half> %va) {
+; CHECK-LABEL: vfptoui_v4f16_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <4 x i8> @llvm.experimental.constrained.fptoui.v4i8.v4f16(<4 x half> %va, metadata !"fpexcept.strict")
+ ret <4 x i8> %evec
+}
+
+declare <4 x i16> @llvm.experimental.constrained.fptosi.v4i16.v4f16(<4 x half>, metadata)
+define <4 x i16> @vfptosi_v4f16_v4i16(<4 x half> %va) {
+; CHECK-LABEL: vfptosi_v4f16_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <4 x i16> @llvm.experimental.constrained.fptosi.v4i16.v4f16(<4 x half> %va, metadata !"fpexcept.strict")
+ ret <4 x i16> %evec
+}
+
+declare <4 x i16> @llvm.experimental.constrained.fptoui.v4i16.v4f16(<4 x half>, metadata)
+define <4 x i16> @vfptoui_v4f16_v4i16(<4 x half> %va) {
+; CHECK-LABEL: vfptoui_v4f16_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <4 x i16> @llvm.experimental.constrained.fptoui.v4i16.v4f16(<4 x half> %va, metadata !"fpexcept.strict")
+ ret <4 x i16> %evec
+}
+
+declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f16(<4 x half>, metadata)
+define <4 x i32> @vfptosi_v4f16_v4i32(<4 x half> %va) {
+; CHECK-LABEL: vfptosi_v4f16_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f16(<4 x half> %va, metadata !"fpexcept.strict")
+ ret <4 x i32> %evec
+}
+
+declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f16(<4 x half>, metadata)
+define <4 x i32> @vfptoui_v4f16_v4i32(<4 x half> %va) {
+; CHECK-LABEL: vfptoui_v4f16_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f16(<4 x half> %va, metadata !"fpexcept.strict")
+ ret <4 x i32> %evec
+}
+
+declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f16(<4 x half>, metadata)
+define <4 x i64> @vfptosi_v4f16_v4i64(<4 x half> %va) {
+; CHECK-LABEL: vfptosi_v4f16_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f16(<4 x half> %va, metadata !"fpexcept.strict")
+ ret <4 x i64> %evec
+}
+
+declare <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f16(<4 x half>, metadata)
+define <4 x i64> @vfptoui_v4f16_v4i64(<4 x half> %va) {
+; CHECK-LABEL: vfptoui_v4f16_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f16(<4 x half> %va, metadata !"fpexcept.strict")
+ ret <4 x i64> %evec
+}
+
+declare <8 x i1> @llvm.experimental.constrained.fptosi.v8i1.v8f16(<8 x half>, metadata)
+define <8 x i1> @vfptosi_v8f16_v8i1(<8 x half> %va) {
+; CHECK-LABEL: vfptosi_v8f16_v8i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <8 x i1> @llvm.experimental.constrained.fptosi.v8i1.v8f16(<8 x half> %va, metadata !"fpexcept.strict")
+ ret <8 x i1> %evec
+}
+
+declare <8 x i1> @llvm.experimental.constrained.fptoui.v8i1.v8f16(<8 x half>, metadata)
+define <8 x i1> @vfptoui_v8f16_v8i1(<8 x half> %va) {
+; CHECK-LABEL: vfptoui_v8f16_v8i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <8 x i1> @llvm.experimental.constrained.fptoui.v8i1.v8f16(<8 x half> %va, metadata !"fpexcept.strict")
+ ret <8 x i1> %evec
+}
+
+declare <8 x i8> @llvm.experimental.constrained.fptosi.v8i8.v8f16(<8 x half>, metadata)
+define <8 x i8> @vfptosi_v8f16_v8i8(<8 x half> %va) {
+; CHECK-LABEL: vfptosi_v8f16_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <8 x i8> @llvm.experimental.constrained.fptosi.v8i8.v8f16(<8 x half> %va, metadata !"fpexcept.strict")
+ ret <8 x i8> %evec
+}
+
+declare <8 x i8> @llvm.experimental.constrained.fptoui.v8i8.v8f16(<8 x half>, metadata)
+define <8 x i8> @vfptoui_v8f16_v8i8(<8 x half> %va) {
+; CHECK-LABEL: vfptoui_v8f16_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <8 x i8> @llvm.experimental.constrained.fptoui.v8i8.v8f16(<8 x half> %va, metadata !"fpexcept.strict")
+ ret <8 x i8> %evec
+}
+
+declare <8 x i16> @llvm.experimental.constrained.fptosi.v8i16.v8f16(<8 x half>, metadata)
+define <8 x i16> @vfptosi_v8f16_v8i16(<8 x half> %va) {
+; CHECK-LABEL: vfptosi_v8f16_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <8 x i16> @llvm.experimental.constrained.fptosi.v8i16.v8f16(<8 x half> %va, metadata !"fpexcept.strict")
+ ret <8 x i16> %evec
+}
+
+declare <8 x i16> @llvm.experimental.constrained.fptoui.v8i16.v8f16(<8 x half>, metadata)
+define <8 x i16> @vfptoui_v8f16_v8i16(<8 x half> %va) {
+; CHECK-LABEL: vfptoui_v8f16_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <8 x i16> @llvm.experimental.constrained.fptoui.v8i16.v8f16(<8 x half> %va, metadata !"fpexcept.strict")
+ ret <8 x i16> %evec
+}
+
+declare <8 x i32> @llvm.experimental.constrained.fptosi.v8i32.v8f16(<8 x half>, metadata)
+define <8 x i32> @vfptosi_v8f16_v8i32(<8 x half> %va) {
+; CHECK-LABEL: vfptosi_v8f16_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <8 x i32> @llvm.experimental.constrained.fptosi.v8i32.v8f16(<8 x half> %va, metadata !"fpexcept.strict")
+ ret <8 x i32> %evec
+}
+
+declare <8 x i32> @llvm.experimental.constrained.fptoui.v8i32.v8f16(<8 x half>, metadata)
+define <8 x i32> @vfptoui_v8f16_v8i32(<8 x half> %va) {
+; CHECK-LABEL: vfptoui_v8f16_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <8 x i32> @llvm.experimental.constrained.fptoui.v8i32.v8f16(<8 x half> %va, metadata !"fpexcept.strict")
+ ret <8 x i32> %evec
+}
+
+declare <8 x i64> @llvm.experimental.constrained.fptosi.v8i64.v8f16(<8 x half>, metadata)
+define <8 x i64> @vfptosi_v8f16_v8i64(<8 x half> %va) {
+; CHECK-LABEL: vfptosi_v8f16_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <8 x i64> @llvm.experimental.constrained.fptosi.v8i64.v8f16(<8 x half> %va, metadata !"fpexcept.strict")
+ ret <8 x i64> %evec
+}
+
+declare <8 x i64> @llvm.experimental.constrained.fptoui.v8i64.v8f16(<8 x half>, metadata)
+define <8 x i64> @vfptoui_v8f16_v8i64(<8 x half> %va) {
+; CHECK-LABEL: vfptoui_v8f16_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <8 x i64> @llvm.experimental.constrained.fptoui.v8i64.v8f16(<8 x half> %va, metadata !"fpexcept.strict")
+ ret <8 x i64> %evec
+}
+
+declare <16 x i1> @llvm.experimental.constrained.fptosi.v16i1.v16f16(<16 x half>, metadata)
+define <16 x i1> @vfptosi_v16f16_v16i1(<16 x half> %va) {
+; CHECK-LABEL: vfptosi_v16f16_v16i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <16 x i1> @llvm.experimental.constrained.fptosi.v16i1.v16f16(<16 x half> %va, metadata !"fpexcept.strict")
+ ret <16 x i1> %evec
+}
+
+declare <16 x i1> @llvm.experimental.constrained.fptoui.v16i1.v16f16(<16 x half>, metadata)
+define <16 x i1> @vfptoui_v16f16_v16i1(<16 x half> %va) {
+; CHECK-LABEL: vfptoui_v16f16_v16i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <16 x i1> @llvm.experimental.constrained.fptoui.v16i1.v16f16(<16 x half> %va, metadata !"fpexcept.strict")
+ ret <16 x i1> %evec
+}
+
+declare <16 x i8> @llvm.experimental.constrained.fptosi.v16i8.v16f16(<16 x half>, metadata)
+define <16 x i8> @vfptosi_v16f16_v16i8(<16 x half> %va) {
+; CHECK-LABEL: vfptosi_v16f16_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <16 x i8> @llvm.experimental.constrained.fptosi.v16i8.v16f16(<16 x half> %va, metadata !"fpexcept.strict")
+ ret <16 x i8> %evec
+}
+
+declare <16 x i8> @llvm.experimental.constrained.fptoui.v16i8.v16f16(<16 x half>, metadata)
+define <16 x i8> @vfptoui_v16f16_v16i8(<16 x half> %va) {
+; CHECK-LABEL: vfptoui_v16f16_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <16 x i8> @llvm.experimental.constrained.fptoui.v16i8.v16f16(<16 x half> %va, metadata !"fpexcept.strict")
+ ret <16 x i8> %evec
+}
+
+declare <16 x i16> @llvm.experimental.constrained.fptosi.v16i16.v16f16(<16 x half>, metadata)
+define <16 x i16> @vfptosi_v16f16_v16i16(<16 x half> %va) {
+; CHECK-LABEL: vfptosi_v16f16_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <16 x i16> @llvm.experimental.constrained.fptosi.v16i16.v16f16(<16 x half> %va, metadata !"fpexcept.strict")
+ ret <16 x i16> %evec
+}
+
+declare <16 x i16> @llvm.experimental.constrained.fptoui.v16i16.v16f16(<16 x half>, metadata)
+define <16 x i16> @vfptoui_v16f16_v16i16(<16 x half> %va) {
+; CHECK-LABEL: vfptoui_v16f16_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <16 x i16> @llvm.experimental.constrained.fptoui.v16i16.v16f16(<16 x half> %va, metadata !"fpexcept.strict")
+ ret <16 x i16> %evec
+}
+
+declare <16 x i32> @llvm.experimental.constrained.fptosi.v16i32.v16f16(<16 x half>, metadata)
+define <16 x i32> @vfptosi_v16f16_v16i32(<16 x half> %va) {
+; CHECK-LABEL: vfptosi_v16f16_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <16 x i32> @llvm.experimental.constrained.fptosi.v16i32.v16f16(<16 x half> %va, metadata !"fpexcept.strict")
+ ret <16 x i32> %evec
+}
+
+declare <16 x i32> @llvm.experimental.constrained.fptoui.v16i32.v16f16(<16 x half>, metadata)
+define <16 x i32> @vfptoui_v16f16_v16i32(<16 x half> %va) {
+; CHECK-LABEL: vfptoui_v16f16_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <16 x i32> @llvm.experimental.constrained.fptoui.v16i32.v16f16(<16 x half> %va, metadata !"fpexcept.strict")
+ ret <16 x i32> %evec
+}
+
+declare <32 x i1> @llvm.experimental.constrained.fptosi.v32i1.v32f16(<32 x half>, metadata)
+define <32 x i1> @vfptosi_v32f16_v32i1(<32 x half> %va) {
+; CHECK-LABEL: vfptosi_v32f16_v32i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vand.vi v8, v12, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <32 x i1> @llvm.experimental.constrained.fptosi.v32i1.v32f16(<32 x half> %va, metadata !"fpexcept.strict")
+ ret <32 x i1> %evec
+}
+
+declare <32 x i1> @llvm.experimental.constrained.fptoui.v32i1.v32f16(<32 x half>, metadata)
+define <32 x i1> @vfptoui_v32f16_v32i1(<32 x half> %va) {
+; CHECK-LABEL: vfptoui_v32f16_v32i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vand.vi v8, v12, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <32 x i1> @llvm.experimental.constrained.fptoui.v32i1.v32f16(<32 x half> %va, metadata !"fpexcept.strict")
+ ret <32 x i1> %evec
+}
+
+declare <32 x i8> @llvm.experimental.constrained.fptosi.v32i8.v32f16(<32 x half>, metadata)
+define <32 x i8> @vfptosi_v32f16_v32i8(<32 x half> %va) {
+; CHECK-LABEL: vfptosi_v32f16_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <32 x i8> @llvm.experimental.constrained.fptosi.v32i8.v32f16(<32 x half> %va, metadata !"fpexcept.strict")
+ ret <32 x i8> %evec
+}
+
+declare <32 x i8> @llvm.experimental.constrained.fptoui.v32i8.v32f16(<32 x half>, metadata)
+define <32 x i8> @vfptoui_v32f16_v32i8(<32 x half> %va) {
+; CHECK-LABEL: vfptoui_v32f16_v32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <32 x i8> @llvm.experimental.constrained.fptoui.v32i8.v32f16(<32 x half> %va, metadata !"fpexcept.strict")
+ ret <32 x i8> %evec
+}
+
+declare <32 x i16> @llvm.experimental.constrained.fptosi.v32i16.v32f16(<32 x half>, metadata)
+define <32 x i16> @vfptosi_v32f16_v32i16(<32 x half> %va) {
+; CHECK-LABEL: vfptosi_v32f16_v32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <32 x i16> @llvm.experimental.constrained.fptosi.v32i16.v32f16(<32 x half> %va, metadata !"fpexcept.strict")
+ ret <32 x i16> %evec
+}
+
+declare <32 x i16> @llvm.experimental.constrained.fptoui.v32i16.v32f16(<32 x half>, metadata)
+define <32 x i16> @vfptoui_v32f16_v32i16(<32 x half> %va) {
+; CHECK-LABEL: vfptoui_v32f16_v32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <32 x i16> @llvm.experimental.constrained.fptoui.v32i16.v32f16(<32 x half> %va, metadata !"fpexcept.strict")
+ ret <32 x i16> %evec
+}
+
+declare <1 x i1> @llvm.experimental.constrained.fptosi.v1i1.v1f32(<1 x float>, metadata)
+define <1 x i1> @vfptosi_v1f32_v1i1(<1 x float> %va) {
+; CHECK-LABEL: vfptosi_v1f32_v1i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <1 x i1> @llvm.experimental.constrained.fptosi.v1i1.v1f32(<1 x float> %va, metadata !"fpexcept.strict")
+ ret <1 x i1> %evec
+}
+
+declare <1 x i1> @llvm.experimental.constrained.fptoui.v1i1.v1f32(<1 x float>, metadata)
+define <1 x i1> @vfptoui_v1f32_v1i1(<1 x float> %va) {
+; CHECK-LABEL: vfptoui_v1f32_v1i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <1 x i1> @llvm.experimental.constrained.fptoui.v1i1.v1f32(<1 x float> %va, metadata !"fpexcept.strict")
+ ret <1 x i1> %evec
+}
+
+declare <1 x i8> @llvm.experimental.constrained.fptosi.v1i8.v1f32(<1 x float>, metadata)
+define <1 x i8> @vfptosi_v1f32_v1i8(<1 x float> %va) {
+; CHECK-LABEL: vfptosi_v1f32_v1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: ret
+ %evec = call <1 x i8> @llvm.experimental.constrained.fptosi.v1i8.v1f32(<1 x float> %va, metadata !"fpexcept.strict")
+ ret <1 x i8> %evec
+}
+
+declare <1 x i8> @llvm.experimental.constrained.fptoui.v1i8.v1f32(<1 x float>, metadata)
+define <1 x i8> @vfptoui_v1f32_v1i8(<1 x float> %va) {
+; CHECK-LABEL: vfptoui_v1f32_v1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: ret
+ %evec = call <1 x i8> @llvm.experimental.constrained.fptoui.v1i8.v1f32(<1 x float> %va, metadata !"fpexcept.strict")
+ ret <1 x i8> %evec
+}
+
+declare <1 x i16> @llvm.experimental.constrained.fptosi.v1i16.v1f32(<1 x float>, metadata)
+define <1 x i16> @vfptosi_v1f32_v1i16(<1 x float> %va) {
+; CHECK-LABEL: vfptosi_v1f32_v1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x i16> @llvm.experimental.constrained.fptosi.v1i16.v1f32(<1 x float> %va, metadata !"fpexcept.strict")
+ ret <1 x i16> %evec
+}
+
+declare <1 x i16> @llvm.experimental.constrained.fptoui.v1i16.v1f32(<1 x float>, metadata)
+define <1 x i16> @vfptoui_v1f32_v1i16(<1 x float> %va) {
+; CHECK-LABEL: vfptoui_v1f32_v1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x i16> @llvm.experimental.constrained.fptoui.v1i16.v1f32(<1 x float> %va, metadata !"fpexcept.strict")
+ ret <1 x i16> %evec
+}
+
+declare <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f32(<1 x float>, metadata)
+define <1 x i32> @vfptosi_v1f32_v1i32(<1 x float> %va) {
+; CHECK-LABEL: vfptosi_v1f32_v1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f32(<1 x float> %va, metadata !"fpexcept.strict")
+ ret <1 x i32> %evec
+}
+
+declare <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f32(<1 x float>, metadata)
+define <1 x i32> @vfptoui_v1f32_v1i32(<1 x float> %va) {
+; CHECK-LABEL: vfptoui_v1f32_v1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f32(<1 x float> %va, metadata !"fpexcept.strict")
+ ret <1 x i32> %evec
+}
+
+declare <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f32(<1 x float>, metadata)
+define <1 x i64> @vfptosi_v1f32_v1i64(<1 x float> %va) {
+; CHECK-LABEL: vfptosi_v1f32_v1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f32(<1 x float> %va, metadata !"fpexcept.strict")
+ ret <1 x i64> %evec
+}
+
+declare <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f32(<1 x float>, metadata)
+define <1 x i64> @vfptoui_v1f32_v1i64(<1 x float> %va) {
+; CHECK-LABEL: vfptoui_v1f32_v1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f32(<1 x float> %va, metadata !"fpexcept.strict")
+ ret <1 x i64> %evec
+}
+
+declare <2 x i1> @llvm.experimental.constrained.fptosi.v2i1.v2f32(<2 x float>, metadata)
+define <2 x i1> @vfptosi_v2f32_v2i1(<2 x float> %va) {
+; CHECK-LABEL: vfptosi_v2f32_v2i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <2 x i1> @llvm.experimental.constrained.fptosi.v2i1.v2f32(<2 x float> %va, metadata !"fpexcept.strict")
+ ret <2 x i1> %evec
+}
+
+declare <2 x i1> @llvm.experimental.constrained.fptoui.v2i1.v2f32(<2 x float>, metadata)
+define <2 x i1> @vfptoui_v2f32_v2i1(<2 x float> %va) {
+; CHECK-LABEL: vfptoui_v2f32_v2i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <2 x i1> @llvm.experimental.constrained.fptoui.v2i1.v2f32(<2 x float> %va, metadata !"fpexcept.strict")
+ ret <2 x i1> %evec
+}
+
+declare <2 x i8> @llvm.experimental.constrained.fptosi.v2i8.v2f32(<2 x float>, metadata)
+define <2 x i8> @vfptosi_v2f32_v2i8(<2 x float> %va) {
+; CHECK-LABEL: vfptosi_v2f32_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: ret
+ %evec = call <2 x i8> @llvm.experimental.constrained.fptosi.v2i8.v2f32(<2 x float> %va, metadata !"fpexcept.strict")
+ ret <2 x i8> %evec
+}
+
+declare <2 x i8> @llvm.experimental.constrained.fptoui.v2i8.v2f32(<2 x float>, metadata)
+define <2 x i8> @vfptoui_v2f32_v2i8(<2 x float> %va) {
+; CHECK-LABEL: vfptoui_v2f32_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: ret
+ %evec = call <2 x i8> @llvm.experimental.constrained.fptoui.v2i8.v2f32(<2 x float> %va, metadata !"fpexcept.strict")
+ ret <2 x i8> %evec
+}
+
+declare <2 x i16> @llvm.experimental.constrained.fptosi.v2i16.v2f32(<2 x float>, metadata)
+define <2 x i16> @vfptosi_v2f32_v2i16(<2 x float> %va) {
+; CHECK-LABEL: vfptosi_v2f32_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x i16> @llvm.experimental.constrained.fptosi.v2i16.v2f32(<2 x float> %va, metadata !"fpexcept.strict")
+ ret <2 x i16> %evec
+}
+
+declare <2 x i16> @llvm.experimental.constrained.fptoui.v2i16.v2f32(<2 x float>, metadata)
+define <2 x i16> @vfptoui_v2f32_v2i16(<2 x float> %va) {
+; CHECK-LABEL: vfptoui_v2f32_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x i16> @llvm.experimental.constrained.fptoui.v2i16.v2f32(<2 x float> %va, metadata !"fpexcept.strict")
+ ret <2 x i16> %evec
+}
+
+declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f32(<2 x float>, metadata)
+define <2 x i32> @vfptosi_v2f32_v2i32(<2 x float> %va) {
+; CHECK-LABEL: vfptosi_v2f32_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f32(<2 x float> %va, metadata !"fpexcept.strict")
+ ret <2 x i32> %evec
+}
+
+declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f32(<2 x float>, metadata)
+define <2 x i32> @vfptoui_v2f32_v2i32(<2 x float> %va) {
+; CHECK-LABEL: vfptoui_v2f32_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f32(<2 x float> %va, metadata !"fpexcept.strict")
+ ret <2 x i32> %evec
+}
+
+declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f32(<2 x float>, metadata)
+define <2 x i64> @vfptosi_v2f32_v2i64(<2 x float> %va) {
+; CHECK-LABEL: vfptosi_v2f32_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f32(<2 x float> %va, metadata !"fpexcept.strict")
+ ret <2 x i64> %evec
+}
+
+declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f32(<2 x float>, metadata)
+define <2 x i64> @vfptoui_v2f32_v2i64(<2 x float> %va) {
+; CHECK-LABEL: vfptoui_v2f32_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f32(<2 x float> %va, metadata !"fpexcept.strict")
+ ret <2 x i64> %evec
+}
+
+declare <4 x i1> @llvm.experimental.constrained.fptosi.v4i1.v4f32(<4 x float>, metadata)
+define <4 x i1> @vfptosi_v4f32_v4i1(<4 x float> %va) {
+; CHECK-LABEL: vfptosi_v4f32_v4i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <4 x i1> @llvm.experimental.constrained.fptosi.v4i1.v4f32(<4 x float> %va, metadata !"fpexcept.strict")
+ ret <4 x i1> %evec
+}
+
+declare <4 x i1> @llvm.experimental.constrained.fptoui.v4i1.v4f32(<4 x float>, metadata)
+define <4 x i1> @vfptoui_v4f32_v4i1(<4 x float> %va) {
+; CHECK-LABEL: vfptoui_v4f32_v4i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <4 x i1> @llvm.experimental.constrained.fptoui.v4i1.v4f32(<4 x float> %va, metadata !"fpexcept.strict")
+ ret <4 x i1> %evec
+}
+
+declare <4 x i8> @llvm.experimental.constrained.fptosi.v4i8.v4f32(<4 x float>, metadata)
+define <4 x i8> @vfptosi_v4f32_v4i8(<4 x float> %va) {
+; CHECK-LABEL: vfptosi_v4f32_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: ret
+ %evec = call <4 x i8> @llvm.experimental.constrained.fptosi.v4i8.v4f32(<4 x float> %va, metadata !"fpexcept.strict")
+ ret <4 x i8> %evec
+}
+
+declare <4 x i8> @llvm.experimental.constrained.fptoui.v4i8.v4f32(<4 x float>, metadata)
+define <4 x i8> @vfptoui_v4f32_v4i8(<4 x float> %va) {
+; CHECK-LABEL: vfptoui_v4f32_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: ret
+ %evec = call <4 x i8> @llvm.experimental.constrained.fptoui.v4i8.v4f32(<4 x float> %va, metadata !"fpexcept.strict")
+ ret <4 x i8> %evec
+}
+
+declare <4 x i16> @llvm.experimental.constrained.fptosi.v4i16.v4f32(<4 x float>, metadata)
+define <4 x i16> @vfptosi_v4f32_v4i16(<4 x float> %va) {
+; CHECK-LABEL: vfptosi_v4f32_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <4 x i16> @llvm.experimental.constrained.fptosi.v4i16.v4f32(<4 x float> %va, metadata !"fpexcept.strict")
+ ret <4 x i16> %evec
+}
+
+declare <4 x i16> @llvm.experimental.constrained.fptoui.v4i16.v4f32(<4 x float>, metadata)
+define <4 x i16> @vfptoui_v4f32_v4i16(<4 x float> %va) {
+; CHECK-LABEL: vfptoui_v4f32_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <4 x i16> @llvm.experimental.constrained.fptoui.v4i16.v4f32(<4 x float> %va, metadata !"fpexcept.strict")
+ ret <4 x i16> %evec
+}
+
+declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float>, metadata)
+define <4 x i32> @vfptosi_v4f32_v4i32(<4 x float> %va) {
+; CHECK-LABEL: vfptosi_v4f32_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float> %va, metadata !"fpexcept.strict")
+ ret <4 x i32> %evec
+}
+
+declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float>, metadata)
+define <4 x i32> @vfptoui_v4f32_v4i32(<4 x float> %va) {
+; CHECK-LABEL: vfptoui_v4f32_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float> %va, metadata !"fpexcept.strict")
+ ret <4 x i32> %evec
+}
+
+declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32(<4 x float>, metadata)
+define <4 x i64> @vfptosi_v4f32_v4i64(<4 x float> %va) {
+; CHECK-LABEL: vfptosi_v4f32_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32(<4 x float> %va, metadata !"fpexcept.strict")
+ ret <4 x i64> %evec
+}
+
+declare <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32(<4 x float>, metadata)
+define <4 x i64> @vfptoui_v4f32_v4i64(<4 x float> %va) {
+; CHECK-LABEL: vfptoui_v4f32_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32(<4 x float> %va, metadata !"fpexcept.strict")
+ ret <4 x i64> %evec
+}
+
+declare <8 x i1> @llvm.experimental.constrained.fptosi.v8i1.v8f32(<8 x float>, metadata)
+define <8 x i1> @vfptosi_v8f32_v8i1(<8 x float> %va) {
+; CHECK-LABEL: vfptosi_v8f32_v8i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <8 x i1> @llvm.experimental.constrained.fptosi.v8i1.v8f32(<8 x float> %va, metadata !"fpexcept.strict")
+ ret <8 x i1> %evec
+}
+
+declare <8 x i1> @llvm.experimental.constrained.fptoui.v8i1.v8f32(<8 x float>, metadata)
+define <8 x i1> @vfptoui_v8f32_v8i1(<8 x float> %va) {
+; CHECK-LABEL: vfptoui_v8f32_v8i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <8 x i1> @llvm.experimental.constrained.fptoui.v8i1.v8f32(<8 x float> %va, metadata !"fpexcept.strict")
+ ret <8 x i1> %evec
+}
+
+declare <8 x i8> @llvm.experimental.constrained.fptosi.v8i8.v8f32(<8 x float>, metadata)
+define <8 x i8> @vfptosi_v8f32_v8i8(<8 x float> %va) {
+; CHECK-LABEL: vfptosi_v8f32_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v10, 0
+; CHECK-NEXT: ret
+ %evec = call <8 x i8> @llvm.experimental.constrained.fptosi.v8i8.v8f32(<8 x float> %va, metadata !"fpexcept.strict")
+ ret <8 x i8> %evec
+}
+
+declare <8 x i8> @llvm.experimental.constrained.fptoui.v8i8.v8f32(<8 x float>, metadata)
+define <8 x i8> @vfptoui_v8f32_v8i8(<8 x float> %va) {
+; CHECK-LABEL: vfptoui_v8f32_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v10, 0
+; CHECK-NEXT: ret
+ %evec = call <8 x i8> @llvm.experimental.constrained.fptoui.v8i8.v8f32(<8 x float> %va, metadata !"fpexcept.strict")
+ ret <8 x i8> %evec
+}
+
+declare <8 x i16> @llvm.experimental.constrained.fptosi.v8i16.v8f32(<8 x float>, metadata)
+define <8 x i16> @vfptosi_v8f32_v8i16(<8 x float> %va) {
+; CHECK-LABEL: vfptosi_v8f32_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <8 x i16> @llvm.experimental.constrained.fptosi.v8i16.v8f32(<8 x float> %va, metadata !"fpexcept.strict")
+ ret <8 x i16> %evec
+}
+
+declare <8 x i16> @llvm.experimental.constrained.fptoui.v8i16.v8f32(<8 x float>, metadata)
+define <8 x i16> @vfptoui_v8f32_v8i16(<8 x float> %va) {
+; CHECK-LABEL: vfptoui_v8f32_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <8 x i16> @llvm.experimental.constrained.fptoui.v8i16.v8f32(<8 x float> %va, metadata !"fpexcept.strict")
+ ret <8 x i16> %evec
+}
+
+declare <8 x i32> @llvm.experimental.constrained.fptosi.v8i32.v8f32(<8 x float>, metadata)
+define <8 x i32> @vfptosi_v8f32_v8i32(<8 x float> %va) {
+; CHECK-LABEL: vfptosi_v8f32_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <8 x i32> @llvm.experimental.constrained.fptosi.v8i32.v8f32(<8 x float> %va, metadata !"fpexcept.strict")
+ ret <8 x i32> %evec
+}
+
+declare <8 x i32> @llvm.experimental.constrained.fptoui.v8i32.v8f32(<8 x float>, metadata)
+define <8 x i32> @vfptoui_v8f32_v8i32(<8 x float> %va) {
+; CHECK-LABEL: vfptoui_v8f32_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <8 x i32> @llvm.experimental.constrained.fptoui.v8i32.v8f32(<8 x float> %va, metadata !"fpexcept.strict")
+ ret <8 x i32> %evec
+}
+
+declare <8 x i64> @llvm.experimental.constrained.fptosi.v8i64.v8f32(<8 x float>, metadata)
+define <8 x i64> @vfptosi_v8f32_v8i64(<8 x float> %va) {
+; CHECK-LABEL: vfptosi_v8f32_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <8 x i64> @llvm.experimental.constrained.fptosi.v8i64.v8f32(<8 x float> %va, metadata !"fpexcept.strict")
+ ret <8 x i64> %evec
+}
+
+declare <8 x i64> @llvm.experimental.constrained.fptoui.v8i64.v8f32(<8 x float>, metadata)
+define <8 x i64> @vfptoui_v8f32_v8i64(<8 x float> %va) {
+; CHECK-LABEL: vfptoui_v8f32_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <8 x i64> @llvm.experimental.constrained.fptoui.v8i64.v8f32(<8 x float> %va, metadata !"fpexcept.strict")
+ ret <8 x i64> %evec
+}
+
+declare <16 x i1> @llvm.experimental.constrained.fptosi.v16i1.v16f32(<16 x float>, metadata)
+define <16 x i1> @vfptosi_v16f32_v16i1(<16 x float> %va) {
+; CHECK-LABEL: vfptosi_v16f32_v16i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vand.vi v8, v12, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <16 x i1> @llvm.experimental.constrained.fptosi.v16i1.v16f32(<16 x float> %va, metadata !"fpexcept.strict")
+ ret <16 x i1> %evec
+}
+
+declare <16 x i1> @llvm.experimental.constrained.fptoui.v16i1.v16f32(<16 x float>, metadata)
+define <16 x i1> @vfptoui_v16f32_v16i1(<16 x float> %va) {
+; CHECK-LABEL: vfptoui_v16f32_v16i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vand.vi v8, v12, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <16 x i1> @llvm.experimental.constrained.fptoui.v16i1.v16f32(<16 x float> %va, metadata !"fpexcept.strict")
+ ret <16 x i1> %evec
+}
+
+declare <16 x i8> @llvm.experimental.constrained.fptosi.v16i8.v16f32(<16 x float>, metadata)
+define <16 x i8> @vfptosi_v16f32_v16i8(<16 x float> %va) {
+; CHECK-LABEL: vfptosi_v16f32_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: ret
+ %evec = call <16 x i8> @llvm.experimental.constrained.fptosi.v16i8.v16f32(<16 x float> %va, metadata !"fpexcept.strict")
+ ret <16 x i8> %evec
+}
+
+declare <16 x i8> @llvm.experimental.constrained.fptoui.v16i8.v16f32(<16 x float>, metadata)
+define <16 x i8> @vfptoui_v16f32_v16i8(<16 x float> %va) {
+; CHECK-LABEL: vfptoui_v16f32_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: ret
+ %evec = call <16 x i8> @llvm.experimental.constrained.fptoui.v16i8.v16f32(<16 x float> %va, metadata !"fpexcept.strict")
+ ret <16 x i8> %evec
+}
+
+declare <16 x i16> @llvm.experimental.constrained.fptosi.v16i16.v16f32(<16 x float>, metadata)
+define <16 x i16> @vfptosi_v16f32_v16i16(<16 x float> %va) {
+; CHECK-LABEL: vfptosi_v16f32_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <16 x i16> @llvm.experimental.constrained.fptosi.v16i16.v16f32(<16 x float> %va, metadata !"fpexcept.strict")
+ ret <16 x i16> %evec
+}
+
+declare <16 x i16> @llvm.experimental.constrained.fptoui.v16i16.v16f32(<16 x float>, metadata)
+define <16 x i16> @vfptoui_v16f32_v16i16(<16 x float> %va) {
+; CHECK-LABEL: vfptoui_v16f32_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <16 x i16> @llvm.experimental.constrained.fptoui.v16i16.v16f32(<16 x float> %va, metadata !"fpexcept.strict")
+ ret <16 x i16> %evec
+}
+
+declare <16 x i32> @llvm.experimental.constrained.fptosi.v16i32.v16f32(<16 x float>, metadata)
+define <16 x i32> @vfptosi_v16f32_v16i32(<16 x float> %va) {
+; CHECK-LABEL: vfptosi_v16f32_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <16 x i32> @llvm.experimental.constrained.fptosi.v16i32.v16f32(<16 x float> %va, metadata !"fpexcept.strict")
+ ret <16 x i32> %evec
+}
+
+declare <16 x i32> @llvm.experimental.constrained.fptoui.v16i32.v16f32(<16 x float>, metadata)
+define <16 x i32> @vfptoui_v16f32_v16i32(<16 x float> %va) {
+; CHECK-LABEL: vfptoui_v16f32_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <16 x i32> @llvm.experimental.constrained.fptoui.v16i32.v16f32(<16 x float> %va, metadata !"fpexcept.strict")
+ ret <16 x i32> %evec
+}
+
+declare <1 x i1> @llvm.experimental.constrained.fptosi.v1i1.v1f64(<1 x double>, metadata)
+define <1 x i1> @vfptosi_v1f64_v1i1(<1 x double> %va) {
+; CHECK-LABEL: vfptosi_v1f64_v1i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <1 x i1> @llvm.experimental.constrained.fptosi.v1i1.v1f64(<1 x double> %va, metadata !"fpexcept.strict")
+ ret <1 x i1> %evec
+}
+
+declare <1 x i1> @llvm.experimental.constrained.fptoui.v1i1.v1f64(<1 x double>, metadata)
+define <1 x i1> @vfptoui_v1f64_v1i1(<1 x double> %va) {
+; CHECK-LABEL: vfptoui_v1f64_v1i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <1 x i1> @llvm.experimental.constrained.fptoui.v1i1.v1f64(<1 x double> %va, metadata !"fpexcept.strict")
+ ret <1 x i1> %evec
+}
+
+declare <1 x i8> @llvm.experimental.constrained.fptosi.v1i8.v1f64(<1 x double>, metadata)
+define <1 x i8> @vfptosi_v1f64_v1i8(<1 x double> %va) {
+; CHECK-LABEL: vfptosi_v1f64_v1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <1 x i8> @llvm.experimental.constrained.fptosi.v1i8.v1f64(<1 x double> %va, metadata !"fpexcept.strict")
+ ret <1 x i8> %evec
+}
+
+declare <1 x i8> @llvm.experimental.constrained.fptoui.v1i8.v1f64(<1 x double>, metadata)
+define <1 x i8> @vfptoui_v1f64_v1i8(<1 x double> %va) {
+; CHECK-LABEL: vfptoui_v1f64_v1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <1 x i8> @llvm.experimental.constrained.fptoui.v1i8.v1f64(<1 x double> %va, metadata !"fpexcept.strict")
+ ret <1 x i8> %evec
+}
+
+declare <1 x i16> @llvm.experimental.constrained.fptosi.v1i16.v1f64(<1 x double>, metadata)
+define <1 x i16> @vfptosi_v1f64_v1i16(<1 x double> %va) {
+; CHECK-LABEL: vfptosi_v1f64_v1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: ret
+ %evec = call <1 x i16> @llvm.experimental.constrained.fptosi.v1i16.v1f64(<1 x double> %va, metadata !"fpexcept.strict")
+ ret <1 x i16> %evec
+}
+
+declare <1 x i16> @llvm.experimental.constrained.fptoui.v1i16.v1f64(<1 x double>, metadata)
+define <1 x i16> @vfptoui_v1f64_v1i16(<1 x double> %va) {
+; CHECK-LABEL: vfptoui_v1f64_v1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: ret
+ %evec = call <1 x i16> @llvm.experimental.constrained.fptoui.v1i16.v1f64(<1 x double> %va, metadata !"fpexcept.strict")
+ ret <1 x i16> %evec
+}
+
+declare <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f64(<1 x double>, metadata)
+define <1 x i32> @vfptosi_v1f64_v1i32(<1 x double> %va) {
+; CHECK-LABEL: vfptosi_v1f64_v1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f64(<1 x double> %va, metadata !"fpexcept.strict")
+ ret <1 x i32> %evec
+}
+
+declare <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64(<1 x double>, metadata)
+define <1 x i32> @vfptoui_v1f64_v1i32(<1 x double> %va) {
+; CHECK-LABEL: vfptoui_v1f64_v1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64(<1 x double> %va, metadata !"fpexcept.strict")
+ ret <1 x i32> %evec
+}
+
+declare <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(<1 x double>, metadata)
+define <1 x i64> @vfptosi_v1f64_v1i64(<1 x double> %va) {
+; CHECK-LABEL: vfptosi_v1f64_v1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(<1 x double> %va, metadata !"fpexcept.strict")
+ ret <1 x i64> %evec
+}
+
+declare <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(<1 x double>, metadata)
+define <1 x i64> @vfptoui_v1f64_v1i64(<1 x double> %va) {
+; CHECK-LABEL: vfptoui_v1f64_v1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(<1 x double> %va, metadata !"fpexcept.strict")
+ ret <1 x i64> %evec
+}
+
+declare <2 x i1> @llvm.experimental.constrained.fptosi.v2i1.v2f64(<2 x double>, metadata)
+define <2 x i1> @vfptosi_v2f64_v2i1(<2 x double> %va) {
+; CHECK-LABEL: vfptosi_v2f64_v2i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <2 x i1> @llvm.experimental.constrained.fptosi.v2i1.v2f64(<2 x double> %va, metadata !"fpexcept.strict")
+ ret <2 x i1> %evec
+}
+
+declare <2 x i1> @llvm.experimental.constrained.fptoui.v2i1.v2f64(<2 x double>, metadata)
+define <2 x i1> @vfptoui_v2f64_v2i1(<2 x double> %va) {
+; CHECK-LABEL: vfptoui_v2f64_v2i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <2 x i1> @llvm.experimental.constrained.fptoui.v2i1.v2f64(<2 x double> %va, metadata !"fpexcept.strict")
+ ret <2 x i1> %evec
+}
+
+declare <2 x i8> @llvm.experimental.constrained.fptosi.v2i8.v2f64(<2 x double>, metadata)
+define <2 x i8> @vfptosi_v2f64_v2i8(<2 x double> %va) {
+; CHECK-LABEL: vfptosi_v2f64_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <2 x i8> @llvm.experimental.constrained.fptosi.v2i8.v2f64(<2 x double> %va, metadata !"fpexcept.strict")
+ ret <2 x i8> %evec
+}
+
+declare <2 x i8> @llvm.experimental.constrained.fptoui.v2i8.v2f64(<2 x double>, metadata)
+define <2 x i8> @vfptoui_v2f64_v2i8(<2 x double> %va) {
+; CHECK-LABEL: vfptoui_v2f64_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <2 x i8> @llvm.experimental.constrained.fptoui.v2i8.v2f64(<2 x double> %va, metadata !"fpexcept.strict")
+ ret <2 x i8> %evec
+}
+
+declare <2 x i16> @llvm.experimental.constrained.fptosi.v2i16.v2f64(<2 x double>, metadata)
+define <2 x i16> @vfptosi_v2f64_v2i16(<2 x double> %va) {
+; CHECK-LABEL: vfptosi_v2f64_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: ret
+ %evec = call <2 x i16> @llvm.experimental.constrained.fptosi.v2i16.v2f64(<2 x double> %va, metadata !"fpexcept.strict")
+ ret <2 x i16> %evec
+}
+
+declare <2 x i16> @llvm.experimental.constrained.fptoui.v2i16.v2f64(<2 x double>, metadata)
+define <2 x i16> @vfptoui_v2f64_v2i16(<2 x double> %va) {
+; CHECK-LABEL: vfptoui_v2f64_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: ret
+ %evec = call <2 x i16> @llvm.experimental.constrained.fptoui.v2i16.v2f64(<2 x double> %va, metadata !"fpexcept.strict")
+ ret <2 x i16> %evec
+}
+
+declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double>, metadata)
+define <2 x i32> @vfptosi_v2f64_v2i32(<2 x double> %va) {
+; CHECK-LABEL: vfptosi_v2f64_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double> %va, metadata !"fpexcept.strict")
+ ret <2 x i32> %evec
+}
+
+declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x double>, metadata)
+define <2 x i32> @vfptoui_v2f64_v2i32(<2 x double> %va) {
+; CHECK-LABEL: vfptoui_v2f64_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x double> %va, metadata !"fpexcept.strict")
+ ret <2 x i32> %evec
+}
+
+declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double>, metadata)
+define <2 x i64> @vfptosi_v2f64_v2i64(<2 x double> %va) {
+; CHECK-LABEL: vfptosi_v2f64_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> %va, metadata !"fpexcept.strict")
+ ret <2 x i64> %evec
+}
+
+declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double>, metadata)
+define <2 x i64> @vfptoui_v2f64_v2i64(<2 x double> %va) {
+; CHECK-LABEL: vfptoui_v2f64_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> %va, metadata !"fpexcept.strict")
+ ret <2 x i64> %evec
+}
+
+declare <4 x i1> @llvm.experimental.constrained.fptosi.v4i1.v4f64(<4 x double>, metadata)
+define <4 x i1> @vfptosi_v4f64_v4i1(<4 x double> %va) {
+; CHECK-LABEL: vfptosi_v4f64_v4i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <4 x i1> @llvm.experimental.constrained.fptosi.v4i1.v4f64(<4 x double> %va, metadata !"fpexcept.strict")
+ ret <4 x i1> %evec
+}
+
+declare <4 x i1> @llvm.experimental.constrained.fptoui.v4i1.v4f64(<4 x double>, metadata)
+define <4 x i1> @vfptoui_v4f64_v4i1(<4 x double> %va) {
+; CHECK-LABEL: vfptoui_v4f64_v4i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <4 x i1> @llvm.experimental.constrained.fptoui.v4i1.v4f64(<4 x double> %va, metadata !"fpexcept.strict")
+ ret <4 x i1> %evec
+}
+
+declare <4 x i8> @llvm.experimental.constrained.fptosi.v4i8.v4f64(<4 x double>, metadata)
+define <4 x i8> @vfptosi_v4f64_v4i8(<4 x double> %va) {
+; CHECK-LABEL: vfptosi_v4f64_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v10, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <4 x i8> @llvm.experimental.constrained.fptosi.v4i8.v4f64(<4 x double> %va, metadata !"fpexcept.strict")
+ ret <4 x i8> %evec
+}
+
+declare <4 x i8> @llvm.experimental.constrained.fptoui.v4i8.v4f64(<4 x double>, metadata)
+define <4 x i8> @vfptoui_v4f64_v4i8(<4 x double> %va) {
+; CHECK-LABEL: vfptoui_v4f64_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v10, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <4 x i8> @llvm.experimental.constrained.fptoui.v4i8.v4f64(<4 x double> %va, metadata !"fpexcept.strict")
+ ret <4 x i8> %evec
+}
+
+declare <4 x i16> @llvm.experimental.constrained.fptosi.v4i16.v4f64(<4 x double>, metadata)
+define <4 x i16> @vfptosi_v4f64_v4i16(<4 x double> %va) {
+; CHECK-LABEL: vfptosi_v4f64_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v10, 0
+; CHECK-NEXT: ret
+ %evec = call <4 x i16> @llvm.experimental.constrained.fptosi.v4i16.v4f64(<4 x double> %va, metadata !"fpexcept.strict")
+ ret <4 x i16> %evec
+}
+
+declare <4 x i16> @llvm.experimental.constrained.fptoui.v4i16.v4f64(<4 x double>, metadata)
+define <4 x i16> @vfptoui_v4f64_v4i16(<4 x double> %va) {
+; CHECK-LABEL: vfptoui_v4f64_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v10, 0
+; CHECK-NEXT: ret
+ %evec = call <4 x i16> @llvm.experimental.constrained.fptoui.v4i16.v4f64(<4 x double> %va, metadata !"fpexcept.strict")
+ ret <4 x i16> %evec
+}
+
+declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f64(<4 x double>, metadata)
+define <4 x i32> @vfptosi_v4f64_v4i32(<4 x double> %va) {
+; CHECK-LABEL: vfptosi_v4f64_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f64(<4 x double> %va, metadata !"fpexcept.strict")
+ ret <4 x i32> %evec
+}
+
+declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f64(<4 x double>, metadata)
+define <4 x i32> @vfptoui_v4f64_v4i32(<4 x double> %va) {
+; CHECK-LABEL: vfptoui_v4f64_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f64(<4 x double> %va, metadata !"fpexcept.strict")
+ ret <4 x i32> %evec
+}
+
+declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f64(<4 x double>, metadata)
+define <4 x i64> @vfptosi_v4f64_v4i64(<4 x double> %va) {
+; CHECK-LABEL: vfptosi_v4f64_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f64(<4 x double> %va, metadata !"fpexcept.strict")
+ ret <4 x i64> %evec
+}
+
+declare <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f64(<4 x double>, metadata)
+define <4 x i64> @vfptoui_v4f64_v4i64(<4 x double> %va) {
+; CHECK-LABEL: vfptoui_v4f64_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f64(<4 x double> %va, metadata !"fpexcept.strict")
+ ret <4 x i64> %evec
+}
+
+declare <8 x i1> @llvm.experimental.constrained.fptosi.v8i1.v8f64(<8 x double>, metadata)
+define <8 x i1> @vfptosi_v8f64_v8i1(<8 x double> %va) {
+; CHECK-LABEL: vfptosi_v8f64_v8i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vand.vi v8, v12, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <8 x i1> @llvm.experimental.constrained.fptosi.v8i1.v8f64(<8 x double> %va, metadata !"fpexcept.strict")
+ ret <8 x i1> %evec
+}
+
+declare <8 x i1> @llvm.experimental.constrained.fptoui.v8i1.v8f64(<8 x double>, metadata)
+define <8 x i1> @vfptoui_v8f64_v8i1(<8 x double> %va) {
+; CHECK-LABEL: vfptoui_v8f64_v8i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vand.vi v8, v12, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <8 x i1> @llvm.experimental.constrained.fptoui.v8i1.v8f64(<8 x double> %va, metadata !"fpexcept.strict")
+ ret <8 x i1> %evec
+}
+
+declare <8 x i8> @llvm.experimental.constrained.fptosi.v8i8.v8f64(<8 x double>, metadata)
+define <8 x i8> @vfptosi_v8f64_v8i8(<8 x double> %va) {
+; CHECK-LABEL: vfptosi_v8f64_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <8 x i8> @llvm.experimental.constrained.fptosi.v8i8.v8f64(<8 x double> %va, metadata !"fpexcept.strict")
+ ret <8 x i8> %evec
+}
+
+declare <8 x i8> @llvm.experimental.constrained.fptoui.v8i8.v8f64(<8 x double>, metadata)
+define <8 x i8> @vfptoui_v8f64_v8i8(<8 x double> %va) {
+; CHECK-LABEL: vfptoui_v8f64_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <8 x i8> @llvm.experimental.constrained.fptoui.v8i8.v8f64(<8 x double> %va, metadata !"fpexcept.strict")
+ ret <8 x i8> %evec
+}
+
+declare <8 x i16> @llvm.experimental.constrained.fptosi.v8i16.v8f64(<8 x double>, metadata)
+define <8 x i16> @vfptosi_v8f64_v8i16(<8 x double> %va) {
+; CHECK-LABEL: vfptosi_v8f64_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: ret
+ %evec = call <8 x i16> @llvm.experimental.constrained.fptosi.v8i16.v8f64(<8 x double> %va, metadata !"fpexcept.strict")
+ ret <8 x i16> %evec
+}
+
+declare <8 x i16> @llvm.experimental.constrained.fptoui.v8i16.v8f64(<8 x double>, metadata)
+define <8 x i16> @vfptoui_v8f64_v8i16(<8 x double> %va) {
+; CHECK-LABEL: vfptoui_v8f64_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: ret
+ %evec = call <8 x i16> @llvm.experimental.constrained.fptoui.v8i16.v8f64(<8 x double> %va, metadata !"fpexcept.strict")
+ ret <8 x i16> %evec
+}
+
+declare <8 x i32> @llvm.experimental.constrained.fptosi.v8i32.v8f64(<8 x double>, metadata)
+define <8 x i32> @vfptosi_v8f64_v8i32(<8 x double> %va) {
+; CHECK-LABEL: vfptosi_v8f64_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <8 x i32> @llvm.experimental.constrained.fptosi.v8i32.v8f64(<8 x double> %va, metadata !"fpexcept.strict")
+ ret <8 x i32> %evec
+}
+
+declare <8 x i32> @llvm.experimental.constrained.fptoui.v8i32.v8f64(<8 x double>, metadata)
+define <8 x i32> @vfptoui_v8f64_v8i32(<8 x double> %va) {
+; CHECK-LABEL: vfptoui_v8f64_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <8 x i32> @llvm.experimental.constrained.fptoui.v8i32.v8f64(<8 x double> %va, metadata !"fpexcept.strict")
+ ret <8 x i32> %evec
+}
+
+declare <8 x i64> @llvm.experimental.constrained.fptosi.v8i64.v8f64(<8 x double>, metadata)
+define <8 x i64> @vfptosi_v8f64_v8i64(<8 x double> %va) {
+; CHECK-LABEL: vfptosi_v8f64_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <8 x i64> @llvm.experimental.constrained.fptosi.v8i64.v8f64(<8 x double> %va, metadata !"fpexcept.strict")
+ ret <8 x i64> %evec
+}
+
+declare <8 x i64> @llvm.experimental.constrained.fptoui.v8i64.v8f64(<8 x double>, metadata)
+define <8 x i64> @vfptoui_v8f64_v8i64(<8 x double> %va) {
+; CHECK-LABEL: vfptoui_v8f64_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <8 x i64> @llvm.experimental.constrained.fptoui.v8i64.v8f64(<8 x double> %va, metadata !"fpexcept.strict")
+ ret <8 x i64> %evec
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll
new file mode 100644
index 0000000000000..29d4735fb2354
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll
@@ -0,0 +1,1748 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+
+declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i1(<1 x i1>, metadata, metadata)
+define <1 x half> @vsitofp_v1i1_v1f16(<1 x i1> %va) {
+; CHECK-LABEL: vsitofp_v1i1_v1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x half> %evec
+}
+
+declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i1(<1 x i1>, metadata, metadata)
+define <1 x half> @vuitofp_v1i1_v1f16(<1 x i1> %va) {
+; CHECK-LABEL: vuitofp_v1i1_v1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x half> %evec
+}
+
+declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i1(<1 x i1>, metadata, metadata)
+define <1 x float> @vsitofp_v1i1_v1f32(<1 x i1> %va) {
+; CHECK-LABEL: vsitofp_v1i1_v1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x float> %evec
+}
+
+declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i1(<1 x i1>, metadata, metadata)
+define <1 x float> @vuitofp_v1i1_v1f32(<1 x i1> %va) {
+; CHECK-LABEL: vuitofp_v1i1_v1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x float> %evec
+}
+
+declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i1(<1 x i1>, metadata, metadata)
+define <1 x double> @vsitofp_v1i1_v1f64(<1 x i1> %va) {
+; CHECK-LABEL: vsitofp_v1i1_v1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x double> %evec
+}
+
+declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i1(<1 x i1>, metadata, metadata)
+define <1 x double> @vuitofp_v1i1_v1f64(<1 x i1> %va) {
+; CHECK-LABEL: vuitofp_v1i1_v1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i1(<1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x double> %evec
+}
+
+declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i1(<2 x i1>, metadata, metadata)
+define <2 x half> @vsitofp_v2i1_v2f16(<2 x i1> %va) {
+; CHECK-LABEL: vsitofp_v2i1_v2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x half> %evec
+}
+
+declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i1(<2 x i1>, metadata, metadata)
+define <2 x half> @vuitofp_v2i1_v2f16(<2 x i1> %va) {
+; CHECK-LABEL: vuitofp_v2i1_v2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x half> %evec
+}
+
+declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i1(<2 x i1>, metadata, metadata)
+define <2 x float> @vsitofp_v2i1_v2f32(<2 x i1> %va) {
+; CHECK-LABEL: vsitofp_v2i1_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x float> %evec
+}
+
+declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i1(<2 x i1>, metadata, metadata)
+define <2 x float> @vuitofp_v2i1_v2f32(<2 x i1> %va) {
+; CHECK-LABEL: vuitofp_v2i1_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x float> %evec
+}
+
+declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i1(<2 x i1>, metadata, metadata)
+define <2 x double> @vsitofp_v2i1_v2f64(<2 x i1> %va) {
+; CHECK-LABEL: vsitofp_v2i1_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x double> %evec
+}
+
+declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i1(<2 x i1>, metadata, metadata)
+define <2 x double> @vuitofp_v2i1_v2f64(<2 x i1> %va) {
+; CHECK-LABEL: vuitofp_v2i1_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i1(<2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x double> %evec
+}
+
+declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i1(<4 x i1>, metadata, metadata)
+define <4 x half> @vsitofp_v4i1_v4f16(<4 x i1> %va) {
+; CHECK-LABEL: vsitofp_v4i1_v4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x half> %evec
+}
+
+declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i1(<4 x i1>, metadata, metadata)
+define <4 x half> @vuitofp_v4i1_v4f16(<4 x i1> %va) {
+; CHECK-LABEL: vuitofp_v4i1_v4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x half> %evec
+}
+
+declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i1(<4 x i1>, metadata, metadata)
+define <4 x float> @vsitofp_v4i1_v4f32(<4 x i1> %va) {
+; CHECK-LABEL: vsitofp_v4i1_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x float> %evec
+}
+
+declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i1(<4 x i1>, metadata, metadata)
+define <4 x float> @vuitofp_v4i1_v4f32(<4 x i1> %va) {
+; CHECK-LABEL: vuitofp_v4i1_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x float> %evec
+}
+
+declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i1(<4 x i1>, metadata, metadata)
+define <4 x double> @vsitofp_v4i1_v4f64(<4 x i1> %va) {
+; CHECK-LABEL: vsitofp_v4i1_v4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v10, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x double> %evec
+}
+
+declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i1(<4 x i1>, metadata, metadata)
+define <4 x double> @vuitofp_v4i1_v4f64(<4 x i1> %va) {
+; CHECK-LABEL: vuitofp_v4i1_v4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i1(<4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x double> %evec
+}
+
+declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i1(<8 x i1>, metadata, metadata)
+define <8 x half> @vsitofp_v8i1_v8f16(<8 x i1> %va) {
+; CHECK-LABEL: vsitofp_v8i1_v8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x half> %evec
+}
+
+declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i1(<8 x i1>, metadata, metadata)
+define <8 x half> @vuitofp_v8i1_v8f16(<8 x i1> %va) {
+; CHECK-LABEL: vuitofp_v8i1_v8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x half> %evec
+}
+
+declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i1(<8 x i1>, metadata, metadata)
+define <8 x float> @vsitofp_v8i1_v8f32(<8 x i1> %va) {
+; CHECK-LABEL: vsitofp_v8i1_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v10, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x float> %evec
+}
+
+declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i1(<8 x i1>, metadata, metadata)
+define <8 x float> @vuitofp_v8i1_v8f32(<8 x i1> %va) {
+; CHECK-LABEL: vuitofp_v8i1_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x float> %evec
+}
+
+declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i1(<8 x i1>, metadata, metadata)
+define <8 x double> @vsitofp_v8i1_v8f64(<8 x i1> %va) {
+; CHECK-LABEL: vsitofp_v8i1_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v12, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x double> %evec
+}
+
+declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i1(<8 x i1>, metadata, metadata)
+define <8 x double> @vuitofp_v8i1_v8f64(<8 x i1> %va) {
+; CHECK-LABEL: vuitofp_v8i1_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i1(<8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x double> %evec
+}
+
+declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i1(<16 x i1>, metadata, metadata)
+define <16 x half> @vsitofp_v16i1_v16f16(<16 x i1> %va) {
+; CHECK-LABEL: vsitofp_v16i1_v16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v10, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i1(<16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <16 x half> %evec
+}
+
+declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i1(<16 x i1>, metadata, metadata)
+define <16 x half> @vuitofp_v16i1_v16f16(<16 x i1> %va) {
+; CHECK-LABEL: vuitofp_v16i1_v16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i1(<16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <16 x half> %evec
+}
+
+declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i1(<16 x i1>, metadata, metadata)
+define <16 x float> @vsitofp_v16i1_v16f32(<16 x i1> %va) {
+; CHECK-LABEL: vsitofp_v16i1_v16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v12, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i1(<16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <16 x float> %evec
+}
+
+declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i1(<16 x i1>, metadata, metadata)
+define <16 x float> @vuitofp_v16i1_v16f32(<16 x i1> %va) {
+; CHECK-LABEL: vuitofp_v16i1_v16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i1(<16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <16 x float> %evec
+}
+
+declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i1(<32 x i1>, metadata, metadata)
+define <32 x half> @vsitofp_v32i1_v32f16(<32 x i1> %va) {
+; CHECK-LABEL: vsitofp_v32i1_v32f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v12, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i1(<32 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <32 x half> %evec
+}
+
+declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i1(<32 x i1>, metadata, metadata)
+define <32 x half> @vuitofp_v32i1_v32f16(<32 x i1> %va) {
+; CHECK-LABEL: vuitofp_v32i1_v32f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i1(<32 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <32 x half> %evec
+}
+
+declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i8(<1 x i8>, metadata, metadata)
+define <1 x half> @vsitofp_v1i8_v1f16(<1 x i8> %va) {
+; CHECK-LABEL: vsitofp_v1i8_v1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x half> %evec
+}
+
+declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i7(<1 x i7>, metadata, metadata)
+define <1 x half> @vsitofp_v1i7_v1f16(<1 x i7> %va) {
+ %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i7(<1 x i7> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x half> %evec
+}
+
+declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i7(<1 x i7>, metadata, metadata)
+define <1 x half> @vuitofp_v1i7_v1f16(<1 x i7> %va) {
+; CHECK-LABEL: vuitofp_v1i7_v1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: andi a0, a0, 127
+; CHECK-NEXT: fcvt.h.wu fa5, a0
+; CHECK-NEXT: fsh fa5, 14(sp)
+; CHECK-NEXT: addi a0, sp, 14
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i7(<1 x i7> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x half> %evec
+}
+
+declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i8(<1 x i8>, metadata, metadata)
+define <1 x half> @vuitofp_v1i8_v1f16(<1 x i8> %va) {
+; CHECK-LABEL: vuitofp_v1i8_v1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x half> %evec
+}
+
+declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i8(<1 x i8>, metadata, metadata)
+define <1 x float> @vsitofp_v1i8_v1f32(<1 x i8> %va) {
+; CHECK-LABEL: vsitofp_v1i8_v1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vsext.vf2 v9, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x float> %evec
+}
+
+declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i8(<1 x i8>, metadata, metadata)
+define <1 x float> @vuitofp_v1i8_v1f32(<1 x i8> %va) {
+; CHECK-LABEL: vuitofp_v1i8_v1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vzext.vf2 v9, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x float> %evec
+}
+
+declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i8(<1 x i8>, metadata, metadata)
+define <1 x double> @vsitofp_v1i8_v1f64(<1 x i8> %va) {
+; CHECK-LABEL: vsitofp_v1i8_v1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v9, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x double> %evec
+}
+
+declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i8(<1 x i8>, metadata, metadata)
+define <1 x double> @vuitofp_v1i8_v1f64(<1 x i8> %va) {
+; CHECK-LABEL: vuitofp_v1i8_v1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf4 v9, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i8(<1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x double> %evec
+}
+
+declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i8(<2 x i8>, metadata, metadata)
+define <2 x half> @vsitofp_v2i8_v2f16(<2 x i8> %va) {
+; CHECK-LABEL: vsitofp_v2i8_v2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x half> %evec
+}
+
+declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i8(<2 x i8>, metadata, metadata)
+define <2 x half> @vuitofp_v2i8_v2f16(<2 x i8> %va) {
+; CHECK-LABEL: vuitofp_v2i8_v2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x half> %evec
+}
+
+declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i8(<2 x i8>, metadata, metadata)
+define <2 x float> @vsitofp_v2i8_v2f32(<2 x i8> %va) {
+; CHECK-LABEL: vsitofp_v2i8_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vsext.vf2 v9, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x float> %evec
+}
+
+declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i8(<2 x i8>, metadata, metadata)
+define <2 x float> @vuitofp_v2i8_v2f32(<2 x i8> %va) {
+; CHECK-LABEL: vuitofp_v2i8_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vzext.vf2 v9, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x float> %evec
+}
+
+declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i8(<2 x i8>, metadata, metadata)
+define <2 x double> @vsitofp_v2i8_v2f64(<2 x i8> %va) {
+; CHECK-LABEL: vsitofp_v2i8_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v9, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x double> %evec
+}
+
+declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i8(<2 x i8>, metadata, metadata)
+define <2 x double> @vuitofp_v2i8_v2f64(<2 x i8> %va) {
+; CHECK-LABEL: vuitofp_v2i8_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf4 v9, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i8(<2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x double> %evec
+}
+
+declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i8(<4 x i8>, metadata, metadata)
+define <4 x half> @vsitofp_v4i8_v4f16(<4 x i8> %va) {
+; CHECK-LABEL: vsitofp_v4i8_v4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x half> %evec
+}
+
+declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i8(<4 x i8>, metadata, metadata)
+define <4 x half> @vuitofp_v4i8_v4f16(<4 x i8> %va) {
+; CHECK-LABEL: vuitofp_v4i8_v4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x half> %evec
+}
+
+declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i8(<4 x i8>, metadata, metadata)
+define <4 x float> @vsitofp_v4i8_v4f32(<4 x i8> %va) {
+; CHECK-LABEL: vsitofp_v4i8_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v9, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x float> %evec
+}
+
+declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i8(<4 x i8>, metadata, metadata)
+define <4 x float> @vuitofp_v4i8_v4f32(<4 x i8> %va) {
+; CHECK-LABEL: vuitofp_v4i8_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v9, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x float> %evec
+}
+
+declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i8(<4 x i8>, metadata, metadata)
+define <4 x double> @vsitofp_v4i8_v4f64(<4 x i8> %va) {
+; CHECK-LABEL: vsitofp_v4i8_v4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x double> %evec
+}
+
+declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i8(<4 x i8>, metadata, metadata)
+define <4 x double> @vuitofp_v4i8_v4f64(<4 x i8> %va) {
+; CHECK-LABEL: vuitofp_v4i8_v4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v10, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i8(<4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x double> %evec
+}
+
+declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i8(<8 x i8>, metadata, metadata)
+define <8 x half> @vsitofp_v8i8_v8f16(<8 x i8> %va) {
+; CHECK-LABEL: vsitofp_v8i8_v8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x half> %evec
+}
+
+declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i8(<8 x i8>, metadata, metadata)
+define <8 x half> @vuitofp_v8i8_v8f16(<8 x i8> %va) {
+; CHECK-LABEL: vuitofp_v8i8_v8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x half> %evec
+}
+
+declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i8(<8 x i8>, metadata, metadata)
+define <8 x float> @vsitofp_v8i8_v8f32(<8 x i8> %va) {
+; CHECK-LABEL: vsitofp_v8i8_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x float> %evec
+}
+
+declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i8(<8 x i8>, metadata, metadata)
+define <8 x float> @vuitofp_v8i8_v8f32(<8 x i8> %va) {
+; CHECK-LABEL: vuitofp_v8i8_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x float> %evec
+}
+
+declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i8(<8 x i8>, metadata, metadata)
+define <8 x double> @vsitofp_v8i8_v8f64(<8 x i8> %va) {
+; CHECK-LABEL: vsitofp_v8i8_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x double> %evec
+}
+
+declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i8(<8 x i8>, metadata, metadata)
+define <8 x double> @vuitofp_v8i8_v8f64(<8 x i8> %va) {
+; CHECK-LABEL: vuitofp_v8i8_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v12, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i8(<8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x double> %evec
+}
+
+declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i8(<16 x i8>, metadata, metadata)
+define <16 x half> @vsitofp_v16i8_v16f16(<16 x i8> %va) {
+; CHECK-LABEL: vsitofp_v16i8_v16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v10, v8
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i8(<16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <16 x half> %evec
+}
+
+declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i8(<16 x i8>, metadata, metadata)
+define <16 x half> @vuitofp_v16i8_v16f16(<16 x i8> %va) {
+; CHECK-LABEL: vuitofp_v16i8_v16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v10, v8
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i8(<16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <16 x half> %evec
+}
+
+declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i8(<16 x i8>, metadata, metadata)
+define <16 x float> @vsitofp_v16i8_v16f32(<16 x i8> %va) {
+; CHECK-LABEL: vsitofp_v16i8_v16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i8(<16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <16 x float> %evec
+}
+
+declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i8(<16 x i8>, metadata, metadata)
+define <16 x float> @vuitofp_v16i8_v16f32(<16 x i8> %va) {
+; CHECK-LABEL: vuitofp_v16i8_v16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i8(<16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <16 x float> %evec
+}
+
+declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i8(<32 x i8>, metadata, metadata)
+define <32 x half> @vsitofp_v32i8_v32f16(<32 x i8> %va) {
+; CHECK-LABEL: vsitofp_v32i8_v32f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v12, v8
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i8(<32 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <32 x half> %evec
+}
+
+declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i8(<32 x i8>, metadata, metadata)
+define <32 x half> @vuitofp_v32i8_v32f16(<32 x i8> %va) {
+; CHECK-LABEL: vuitofp_v32i8_v32f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v12, v8
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i8(<32 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <32 x half> %evec
+}
+
+declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i16(<1 x i16>, metadata, metadata)
+define <1 x half> @vsitofp_v1i16_v1f16(<1 x i16> %va) {
+; CHECK-LABEL: vsitofp_v1i16_v1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x half> %evec
+}
+
+declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i16(<1 x i16>, metadata, metadata)
+define <1 x half> @vuitofp_v1i16_v1f16(<1 x i16> %va) {
+; CHECK-LABEL: vuitofp_v1i16_v1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x half> %evec
+}
+
+declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i16(<1 x i16>, metadata, metadata)
+define <1 x float> @vsitofp_v1i16_v1f32(<1 x i16> %va) {
+; CHECK-LABEL: vsitofp_v1i16_v1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x float> %evec
+}
+
+declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i16(<1 x i16>, metadata, metadata)
+define <1 x float> @vuitofp_v1i16_v1f32(<1 x i16> %va) {
+; CHECK-LABEL: vuitofp_v1i16_v1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x float> %evec
+}
+
+declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i16(<1 x i16>, metadata, metadata)
+define <1 x double> @vsitofp_v1i16_v1f64(<1 x i16> %va) {
+; CHECK-LABEL: vsitofp_v1i16_v1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v9, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x double> %evec
+}
+
+declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i16(<1 x i16>, metadata, metadata)
+define <1 x double> @vuitofp_v1i16_v1f64(<1 x i16> %va) {
+; CHECK-LABEL: vuitofp_v1i16_v1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v9, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i16(<1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x double> %evec
+}
+
+declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i16(<2 x i16>, metadata, metadata)
+define <2 x half> @vsitofp_v2i16_v2f16(<2 x i16> %va) {
+; CHECK-LABEL: vsitofp_v2i16_v2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x half> %evec
+}
+
+declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i16(<2 x i16>, metadata, metadata)
+define <2 x half> @vuitofp_v2i16_v2f16(<2 x i16> %va) {
+; CHECK-LABEL: vuitofp_v2i16_v2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x half> %evec
+}
+
+declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i16(<2 x i16>, metadata, metadata)
+define <2 x float> @vsitofp_v2i16_v2f32(<2 x i16> %va) {
+; CHECK-LABEL: vsitofp_v2i16_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x float> %evec
+}
+
+declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i16(<2 x i16>, metadata, metadata)
+define <2 x float> @vuitofp_v2i16_v2f32(<2 x i16> %va) {
+; CHECK-LABEL: vuitofp_v2i16_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x float> %evec
+}
+
+declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
+define <2 x double> @vsitofp_v2i16_v2f64(<2 x i16> %va) {
+; CHECK-LABEL: vsitofp_v2i16_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v9, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x double> %evec
+}
+
+declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
+define <2 x double> @vuitofp_v2i16_v2f64(<2 x i16> %va) {
+; CHECK-LABEL: vuitofp_v2i16_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v9, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i16(<2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x double> %evec
+}
+
+declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i16(<4 x i16>, metadata, metadata)
+define <4 x half> @vsitofp_v4i16_v4f16(<4 x i16> %va) {
+; CHECK-LABEL: vsitofp_v4i16_v4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x half> %evec
+}
+
+declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i16(<4 x i16>, metadata, metadata)
+define <4 x half> @vuitofp_v4i16_v4f16(<4 x i16> %va) {
+; CHECK-LABEL: vuitofp_v4i16_v4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x half> %evec
+}
+
+declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i16(<4 x i16>, metadata, metadata)
+define <4 x float> @vsitofp_v4i16_v4f32(<4 x i16> %va) {
+; CHECK-LABEL: vsitofp_v4i16_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x float> %evec
+}
+
+declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i16(<4 x i16>, metadata, metadata)
+define <4 x float> @vuitofp_v4i16_v4f32(<4 x i16> %va) {
+; CHECK-LABEL: vuitofp_v4i16_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x float> %evec
+}
+
+declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i16(<4 x i16>, metadata, metadata)
+define <4 x double> @vsitofp_v4i16_v4f64(<4 x i16> %va) {
+; CHECK-LABEL: vsitofp_v4i16_v4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x double> %evec
+}
+
+declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i16(<4 x i16>, metadata, metadata)
+define <4 x double> @vuitofp_v4i16_v4f64(<4 x i16> %va) {
+; CHECK-LABEL: vuitofp_v4i16_v4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i16(<4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x double> %evec
+}
+
+declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i16(<8 x i16>, metadata, metadata)
+define <8 x half> @vsitofp_v8i16_v8f16(<8 x i16> %va) {
+; CHECK-LABEL: vsitofp_v8i16_v8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x half> %evec
+}
+
+declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i16(<8 x i16>, metadata, metadata)
+define <8 x half> @vuitofp_v8i16_v8f16(<8 x i16> %va) {
+; CHECK-LABEL: vuitofp_v8i16_v8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x half> %evec
+}
+
+declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i16(<8 x i16>, metadata, metadata)
+define <8 x float> @vsitofp_v8i16_v8f32(<8 x i16> %va) {
+; CHECK-LABEL: vsitofp_v8i16_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v10, v8
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x float> %evec
+}
+
+declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i16(<8 x i16>, metadata, metadata)
+define <8 x float> @vuitofp_v8i16_v8f32(<8 x i16> %va) {
+; CHECK-LABEL: vuitofp_v8i16_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v10, v8
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x float> %evec
+}
+
+declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i16(<8 x i16>, metadata, metadata)
+define <8 x double> @vsitofp_v8i16_v8f64(<8 x i16> %va) {
+; CHECK-LABEL: vsitofp_v8i16_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x double> %evec
+}
+
+declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i16(<8 x i16>, metadata, metadata)
+define <8 x double> @vuitofp_v8i16_v8f64(<8 x i16> %va) {
+; CHECK-LABEL: vuitofp_v8i16_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i16(<8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x double> %evec
+}
+
+declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i16(<16 x i16>, metadata, metadata)
+define <16 x half> @vsitofp_v16i16_v16f16(<16 x i16> %va) {
+; CHECK-LABEL: vsitofp_v16i16_v16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i16(<16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <16 x half> %evec
+}
+
+declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i16(<16 x i16>, metadata, metadata)
+define <16 x half> @vuitofp_v16i16_v16f16(<16 x i16> %va) {
+; CHECK-LABEL: vuitofp_v16i16_v16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i16(<16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <16 x half> %evec
+}
+
+declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i16(<16 x i16>, metadata, metadata)
+define <16 x float> @vsitofp_v16i16_v16f32(<16 x i16> %va) {
+; CHECK-LABEL: vsitofp_v16i16_v16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v12, v8
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i16(<16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <16 x float> %evec
+}
+
+declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i16(<16 x i16>, metadata, metadata)
+define <16 x float> @vuitofp_v16i16_v16f32(<16 x i16> %va) {
+; CHECK-LABEL: vuitofp_v16i16_v16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v12, v8
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i16(<16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <16 x float> %evec
+}
+
+declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i16(<32 x i16>, metadata, metadata)
+define <32 x half> @vsitofp_v32i16_v32f16(<32 x i16> %va) {
+; CHECK-LABEL: vsitofp_v32i16_v32f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i16(<32 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <32 x half> %evec
+}
+
+declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i16(<32 x i16>, metadata, metadata)
+define <32 x half> @vuitofp_v32i16_v32f16(<32 x i16> %va) {
+; CHECK-LABEL: vuitofp_v32i16_v32f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i16(<32 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <32 x half> %evec
+}
+
+declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i32(<1 x i32>, metadata, metadata)
+define <1 x half> @vsitofp_v1i32_v1f16(<1 x i32> %va) {
+; CHECK-LABEL: vsitofp_v1i32_v1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x half> %evec
+}
+
+declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i32(<1 x i32>, metadata, metadata)
+define <1 x half> @vuitofp_v1i32_v1f16(<1 x i32> %va) {
+; CHECK-LABEL: vuitofp_v1i32_v1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x half> %evec
+}
+
+declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i32(<1 x i32>, metadata, metadata)
+define <1 x float> @vsitofp_v1i32_v1f32(<1 x i32> %va) {
+; CHECK-LABEL: vsitofp_v1i32_v1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x float> %evec
+}
+
+declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i32(<1 x i32>, metadata, metadata)
+define <1 x float> @vuitofp_v1i32_v1f32(<1 x i32> %va) {
+; CHECK-LABEL: vuitofp_v1i32_v1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x float> %evec
+}
+
+declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
+define <1 x double> @vsitofp_v1i32_v1f64(<1 x i32> %va) {
+; CHECK-LABEL: vsitofp_v1i32_v1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x double> %evec
+}
+
+declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
+define <1 x double> @vuitofp_v1i32_v1f64(<1 x i32> %va) {
+; CHECK-LABEL: vuitofp_v1i32_v1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x double> %evec
+}
+
+declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i32(<2 x i32>, metadata, metadata)
+define <2 x half> @vsitofp_v2i32_v2f16(<2 x i32> %va) {
+; CHECK-LABEL: vsitofp_v2i32_v2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x half> %evec
+}
+
+declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i32(<2 x i32>, metadata, metadata)
+define <2 x half> @vuitofp_v2i32_v2f16(<2 x i32> %va) {
+; CHECK-LABEL: vuitofp_v2i32_v2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x half> %evec
+}
+
+declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i32(<2 x i32>, metadata, metadata)
+define <2 x float> @vsitofp_v2i32_v2f32(<2 x i32> %va) {
+; CHECK-LABEL: vsitofp_v2i32_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x float> %evec
+}
+
+declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i32(<2 x i32>, metadata, metadata)
+define <2 x float> @vuitofp_v2i32_v2f32(<2 x i32> %va) {
+; CHECK-LABEL: vuitofp_v2i32_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x float> %evec
+}
+
+declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
+define <2 x double> @vsitofp_v2i32_v2f64(<2 x i32> %va) {
+; CHECK-LABEL: vsitofp_v2i32_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x double> %evec
+}
+
+declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
+define <2 x double> @vuitofp_v2i32_v2f64(<2 x i32> %va) {
+; CHECK-LABEL: vuitofp_v2i32_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x double> %evec
+}
+
+declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i32(<4 x i32>, metadata, metadata)
+define <4 x half> @vsitofp_v4i32_v4f16(<4 x i32> %va) {
+; CHECK-LABEL: vsitofp_v4i32_v4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x half> %evec
+}
+
+declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i32(<4 x i32>, metadata, metadata)
+define <4 x half> @vuitofp_v4i32_v4f16(<4 x i32> %va) {
+; CHECK-LABEL: vuitofp_v4i32_v4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x half> %evec
+}
+
+declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
+define <4 x float> @vsitofp_v4i32_v4f32(<4 x i32> %va) {
+; CHECK-LABEL: vsitofp_v4i32_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x float> %evec
+}
+
+declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
+define <4 x float> @vuitofp_v4i32_v4f32(<4 x i32> %va) {
+; CHECK-LABEL: vuitofp_v4i32_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x float> %evec
+}
+
+declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i32(<4 x i32>, metadata, metadata)
+define <4 x double> @vsitofp_v4i32_v4f64(<4 x i32> %va) {
+; CHECK-LABEL: vsitofp_v4i32_v4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v10, v8
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x double> %evec
+}
+
+declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i32(<4 x i32>, metadata, metadata)
+define <4 x double> @vuitofp_v4i32_v4f64(<4 x i32> %va) {
+; CHECK-LABEL: vuitofp_v4i32_v4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v10, v8
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i32(<4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x double> %evec
+}
+
+declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i32(<8 x i32>, metadata, metadata)
+define <8 x half> @vsitofp_v8i32_v8f16(<8 x i32> %va) {
+; CHECK-LABEL: vsitofp_v8i32_v8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x half> %evec
+}
+
+declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i32(<8 x i32>, metadata, metadata)
+define <8 x half> @vuitofp_v8i32_v8f16(<8 x i32> %va) {
+; CHECK-LABEL: vuitofp_v8i32_v8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x half> %evec
+}
+
+declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i32(<8 x i32>, metadata, metadata)
+define <8 x float> @vsitofp_v8i32_v8f32(<8 x i32> %va) {
+; CHECK-LABEL: vsitofp_v8i32_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x float> %evec
+}
+
+declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i32(<8 x i32>, metadata, metadata)
+define <8 x float> @vuitofp_v8i32_v8f32(<8 x i32> %va) {
+; CHECK-LABEL: vuitofp_v8i32_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x float> %evec
+}
+
+declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i32(<8 x i32>, metadata, metadata)
+define <8 x double> @vsitofp_v8i32_v8f64(<8 x i32> %va) {
+; CHECK-LABEL: vsitofp_v8i32_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v12, v8
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x double> %evec
+}
+
+declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i32(<8 x i32>, metadata, metadata)
+define <8 x double> @vuitofp_v8i32_v8f64(<8 x i32> %va) {
+; CHECK-LABEL: vuitofp_v8i32_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v12, v8
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i32(<8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x double> %evec
+}
+
+declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i32(<16 x i32>, metadata, metadata)
+define <16 x half> @vsitofp_v16i32_v16f16(<16 x i32> %va) {
+; CHECK-LABEL: vsitofp_v16i32_v16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i32(<16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <16 x half> %evec
+}
+
+declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i32(<16 x i32>, metadata, metadata)
+define <16 x half> @vuitofp_v16i32_v16f16(<16 x i32> %va) {
+; CHECK-LABEL: vuitofp_v16i32_v16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i32(<16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <16 x half> %evec
+}
+
+declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i32(<16 x i32>, metadata, metadata)
+define <16 x float> @vsitofp_v16i32_v16f32(<16 x i32> %va) {
+; CHECK-LABEL: vsitofp_v16i32_v16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i32(<16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <16 x float> %evec
+}
+
+declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i32(<16 x i32>, metadata, metadata)
+define <16 x float> @vuitofp_v16i32_v16f32(<16 x i32> %va) {
+; CHECK-LABEL: vuitofp_v16i32_v16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i32(<16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <16 x float> %evec
+}
+
+declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i64(<1 x i64>, metadata, metadata)
+define <1 x half> @vsitofp_v1i64_v1f16(<1 x i64> %va) {
+; CHECK-LABEL: vsitofp_v1i64_v1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.f.f.w v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x half> %evec
+}
+
+declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i64(<1 x i64>, metadata, metadata)
+define <1 x half> @vuitofp_v1i64_v1f16(<1 x i64> %va) {
+; CHECK-LABEL: vuitofp_v1i64_v1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.f.f.w v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x half> %evec
+}
+
+declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i64(<1 x i64>, metadata, metadata)
+define <1 x float> @vsitofp_v1i64_v1f32(<1 x i64> %va) {
+; CHECK-LABEL: vsitofp_v1i64_v1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x float> %evec
+}
+
+declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i64(<1 x i64>, metadata, metadata)
+define <1 x float> @vuitofp_v1i64_v1f32(<1 x i64> %va) {
+; CHECK-LABEL: vuitofp_v1i64_v1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x float> %evec
+}
+
+declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
+define <1 x double> @vsitofp_v1i64_v1f64(<1 x i64> %va) {
+; CHECK-LABEL: vsitofp_v1i64_v1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x double> %evec
+}
+
+declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
+define <1 x double> @vuitofp_v1i64_v1f64(<1 x i64> %va) {
+; CHECK-LABEL: vuitofp_v1i64_v1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <1 x double> %evec
+}
+
+
+declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i64(<2 x i64>, metadata, metadata)
+define <2 x half> @vsitofp_v2i64_v2f16(<2 x i64> %va) {
+; CHECK-LABEL: vsitofp_v2i64_v2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.f.f.w v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x half> %evec
+}
+
+declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i64(<2 x i64>, metadata, metadata)
+define <2 x half> @vuitofp_v2i64_v2f16(<2 x i64> %va) {
+; CHECK-LABEL: vuitofp_v2i64_v2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.f.f.w v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x half> %evec
+}
+
+declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i64(<2 x i64>, metadata, metadata)
+define <2 x float> @vsitofp_v2i64_v2f32(<2 x i64> %va) {
+; CHECK-LABEL: vsitofp_v2i64_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x float> %evec
+}
+
+declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i64(<2 x i64>, metadata, metadata)
+define <2 x float> @vuitofp_v2i64_v2f32(<2 x i64> %va) {
+; CHECK-LABEL: vuitofp_v2i64_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x float> %evec
+}
+
+declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
+define <2 x double> @vsitofp_v2i64_v2f64(<2 x i64> %va) {
+; CHECK-LABEL: vsitofp_v2i64_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x double> %evec
+}
+
+declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
+define <2 x double> @vuitofp_v2i64_v2f64(<2 x i64> %va) {
+; CHECK-LABEL: vuitofp_v2i64_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <2 x double> %evec
+}
+
+declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i64(<4 x i64>, metadata, metadata)
+define <4 x half> @vsitofp_v4i64_v4f16(<4 x i64> %va) {
+; CHECK-LABEL: vsitofp_v4i64_v4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfncvt.f.f.w v8, v10
+; CHECK-NEXT: ret
+ %evec = call <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x half> %evec
+}
+
+declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i64(<4 x i64>, metadata, metadata)
+define <4 x half> @vuitofp_v4i64_v4f16(<4 x i64> %va) {
+; CHECK-LABEL: vuitofp_v4i64_v4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfncvt.f.f.w v8, v10
+; CHECK-NEXT: ret
+ %evec = call <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x half> %evec
+}
+
+declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
+define <4 x float> @vsitofp_v4i64_v4f32(<4 x i64> %va) {
+; CHECK-LABEL: vsitofp_v4i64_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x float> %evec
+}
+
+declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
+define <4 x float> @vuitofp_v4i64_v4f32(<4 x i64> %va) {
+; CHECK-LABEL: vuitofp_v4i64_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x float> %evec
+}
+
+declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i64(<4 x i64>, metadata, metadata)
+define <4 x double> @vsitofp_v4i64_v4f64(<4 x i64> %va) {
+; CHECK-LABEL: vsitofp_v4i64_v4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x double> %evec
+}
+
+declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i64(<4 x i64>, metadata, metadata)
+define <4 x double> @vuitofp_v4i64_v4f64(<4 x i64> %va) {
+; CHECK-LABEL: vuitofp_v4i64_v4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i64(<4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <4 x double> %evec
+}
+
+declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i64(<8 x i64>, metadata, metadata)
+define <8 x half> @vsitofp_v8i64_v8f16(<8 x i64> %va) {
+; CHECK-LABEL: vsitofp_v8i64_v8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.f.f.w v8, v12
+; CHECK-NEXT: ret
+ %evec = call <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x half> %evec
+}
+
+declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i64(<8 x i64>, metadata, metadata)
+define <8 x half> @vuitofp_v8i64_v8f16(<8 x i64> %va) {
+; CHECK-LABEL: vuitofp_v8i64_v8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.f.f.w v8, v12
+; CHECK-NEXT: ret
+ %evec = call <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x half> %evec
+}
+
+declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i64(<8 x i64>, metadata, metadata)
+define <8 x float> @vsitofp_v8i64_v8f32(<8 x i64> %va) {
+; CHECK-LABEL: vsitofp_v8i64_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x float> %evec
+}
+
+declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i64(<8 x i64>, metadata, metadata)
+define <8 x float> @vuitofp_v8i64_v8f32(<8 x i64> %va) {
+; CHECK-LABEL: vuitofp_v8i64_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x float> %evec
+}
+
+declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i64(<8 x i64>, metadata, metadata)
+define <8 x double> @vsitofp_v8i64_v8f64(<8 x i64> %va) {
+; CHECK-LABEL: vsitofp_v8i64_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x double> %evec
+}
+
+declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i64(<8 x i64>, metadata, metadata)
+define <8 x double> @vuitofp_v8i64_v8f64(<8 x i64> %va) {
+; CHECK-LABEL: vuitofp_v8i64_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i64(<8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x double> %evec
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoi-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoi-constrained-sdnode.ll
new file mode 100644
index 0000000000000..3d27321439757
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoi-constrained-sdnode.ll
@@ -0,0 +1,1783 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+
+declare <vscale x 1 x i1> @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f16(<vscale x 1 x half>, metadata)
+define <vscale x 1 x i1> @vfptosi_nxv1f16_nxv1i1(<vscale x 1 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv1f16_nxv1i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i1> @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i1> %evec
+}
+
+declare <vscale x 1 x i1> @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f16(<vscale x 1 x half>, metadata)
+define <vscale x 1 x i1> @vfptoui_nxv1f16_nxv1i1(<vscale x 1 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv1f16_nxv1i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i1> @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i1> %evec
+}
+
+declare <vscale x 1 x i7> @llvm.experimental.constrained.fptosi.nxv1i7.nxv1f16(<vscale x 1 x half>, metadata)
+define <vscale x 1 x i7> @vfptosi_nxv1f16_nxv1i7(<vscale x 1 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv1f16_nxv1i7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i7> @llvm.experimental.constrained.fptosi.nxv1i7.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i7> %evec
+}
+
+declare <vscale x 1 x i7> @llvm.experimental.constrained.fptoui.nxv1i7.nxv1f16(<vscale x 1 x half>, metadata)
+define <vscale x 1 x i7> @vfptoui_nxv1f16_nxv1i7(<vscale x 1 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv1f16_nxv1i7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i7> @llvm.experimental.constrained.fptoui.nxv1i7.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i7> %evec
+}
+
+declare <vscale x 1 x i8> @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f16(<vscale x 1 x half>, metadata)
+define <vscale x 1 x i8> @vfptosi_nxv1f16_nxv1i8(<vscale x 1 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv1f16_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i8> @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i8> %evec
+}
+
+declare <vscale x 1 x i8> @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f16(<vscale x 1 x half>, metadata)
+define <vscale x 1 x i8> @vfptoui_nxv1f16_nxv1i8(<vscale x 1 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv1f16_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i8> @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i8> %evec
+}
+
+declare <vscale x 1 x i16> @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f16(<vscale x 1 x half>, metadata)
+define <vscale x 1 x i16> @vfptosi_nxv1f16_nxv1i16(<vscale x 1 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv1f16_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i16> @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i16> %evec
+}
+
+declare <vscale x 1 x i16> @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f16(<vscale x 1 x half>, metadata)
+define <vscale x 1 x i16> @vfptoui_nxv1f16_nxv1i16(<vscale x 1 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv1f16_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i16> @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i16> %evec
+}
+
+declare <vscale x 1 x i32> @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f16(<vscale x 1 x half>, metadata)
+define <vscale x 1 x i32> @vfptosi_nxv1f16_nxv1i32(<vscale x 1 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv1f16_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i32> @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i32> %evec
+}
+
+declare <vscale x 1 x i32> @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f16(<vscale x 1 x half>, metadata)
+define <vscale x 1 x i32> @vfptoui_nxv1f16_nxv1i32(<vscale x 1 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv1f16_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i32> @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i32> %evec
+}
+
+declare <vscale x 1 x i64> @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f16(<vscale x 1 x half>, metadata)
+define <vscale x 1 x i64> @vfptosi_nxv1f16_nxv1i64(<vscale x 1 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv1f16_nxv1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i64> @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i64> %evec
+}
+
+declare <vscale x 1 x i64> @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f16(<vscale x 1 x half>, metadata)
+define <vscale x 1 x i64> @vfptoui_nxv1f16_nxv1i64(<vscale x 1 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv1f16_nxv1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i64> @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f16(<vscale x 1 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i64> %evec
+}
+
+declare <vscale x 2 x i1> @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f16(<vscale x 2 x half>, metadata)
+define <vscale x 2 x i1> @vfptosi_nxv2f16_nxv2i1(<vscale x 2 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv2f16_nxv2i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i1> @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i1> %evec
+}
+
+declare <vscale x 2 x i1> @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f16(<vscale x 2 x half>, metadata)
+define <vscale x 2 x i1> @vfptoui_nxv2f16_nxv2i1(<vscale x 2 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv2f16_nxv2i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i1> @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i1> %evec
+}
+
+declare <vscale x 2 x i8> @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f16(<vscale x 2 x half>, metadata)
+define <vscale x 2 x i8> @vfptosi_nxv2f16_nxv2i8(<vscale x 2 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv2f16_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i8> @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i8> %evec
+}
+
+declare <vscale x 2 x i8> @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f16(<vscale x 2 x half>, metadata)
+define <vscale x 2 x i8> @vfptoui_nxv2f16_nxv2i8(<vscale x 2 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv2f16_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i8> @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i8> %evec
+}
+
+declare <vscale x 2 x i16> @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f16(<vscale x 2 x half>, metadata)
+define <vscale x 2 x i16> @vfptosi_nxv2f16_nxv2i16(<vscale x 2 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv2f16_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i16> @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i16> %evec
+}
+
+declare <vscale x 2 x i16> @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f16(<vscale x 2 x half>, metadata)
+define <vscale x 2 x i16> @vfptoui_nxv2f16_nxv2i16(<vscale x 2 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv2f16_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i16> @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i16> %evec
+}
+
+declare <vscale x 2 x i32> @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f16(<vscale x 2 x half>, metadata)
+define <vscale x 2 x i32> @vfptosi_nxv2f16_nxv2i32(<vscale x 2 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv2f16_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i32> @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i32> %evec
+}
+
+declare <vscale x 2 x i32> @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f16(<vscale x 2 x half>, metadata)
+define <vscale x 2 x i32> @vfptoui_nxv2f16_nxv2i32(<vscale x 2 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv2f16_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i32> @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i32> %evec
+}
+
+declare <vscale x 2 x i64> @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f16(<vscale x 2 x half>, metadata)
+define <vscale x 2 x i64> @vfptosi_nxv2f16_nxv2i64(<vscale x 2 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv2f16_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i64> @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i64> %evec
+}
+
+declare <vscale x 2 x i64> @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f16(<vscale x 2 x half>, metadata)
+define <vscale x 2 x i64> @vfptoui_nxv2f16_nxv2i64(<vscale x 2 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv2f16_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i64> @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f16(<vscale x 2 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i64> %evec
+}
+
+declare <vscale x 4 x i1> @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f16(<vscale x 4 x half>, metadata)
+define <vscale x 4 x i1> @vfptosi_nxv4f16_nxv4i1(<vscale x 4 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv4f16_nxv4i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i1> @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i1> %evec
+}
+
+declare <vscale x 4 x i1> @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f16(<vscale x 4 x half>, metadata)
+define <vscale x 4 x i1> @vfptoui_nxv4f16_nxv4i1(<vscale x 4 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv4f16_nxv4i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i1> @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i1> %evec
+}
+
+declare <vscale x 4 x i8> @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f16(<vscale x 4 x half>, metadata)
+define <vscale x 4 x i8> @vfptosi_nxv4f16_nxv4i8(<vscale x 4 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv4f16_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i8> @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i8> %evec
+}
+
+declare <vscale x 4 x i8> @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f16(<vscale x 4 x half>, metadata)
+define <vscale x 4 x i8> @vfptoui_nxv4f16_nxv4i8(<vscale x 4 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv4f16_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i8> @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i8> %evec
+}
+
+declare <vscale x 4 x i16> @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f16(<vscale x 4 x half>, metadata)
+define <vscale x 4 x i16> @vfptosi_nxv4f16_nxv4i16(<vscale x 4 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv4f16_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i16> @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i16> %evec
+}
+
+declare <vscale x 4 x i16> @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f16(<vscale x 4 x half>, metadata)
+define <vscale x 4 x i16> @vfptoui_nxv4f16_nxv4i16(<vscale x 4 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv4f16_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i16> @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i16> %evec
+}
+
+declare <vscale x 4 x i32> @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f16(<vscale x 4 x half>, metadata)
+define <vscale x 4 x i32> @vfptosi_nxv4f16_nxv4i32(<vscale x 4 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv4f16_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i32> @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i32> %evec
+}
+
+declare <vscale x 4 x i32> @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f16(<vscale x 4 x half>, metadata)
+define <vscale x 4 x i32> @vfptoui_nxv4f16_nxv4i32(<vscale x 4 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv4f16_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i32> @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i32> %evec
+}
+
+declare <vscale x 4 x i64> @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f16(<vscale x 4 x half>, metadata)
+define <vscale x 4 x i64> @vfptosi_nxv4f16_nxv4i64(<vscale x 4 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv4f16_nxv4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i64> @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i64> %evec
+}
+
+declare <vscale x 4 x i64> @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f16(<vscale x 4 x half>, metadata)
+define <vscale x 4 x i64> @vfptoui_nxv4f16_nxv4i64(<vscale x 4 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv4f16_nxv4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i64> @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f16(<vscale x 4 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i64> %evec
+}
+
+declare <vscale x 8 x i1> @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f16(<vscale x 8 x half>, metadata)
+define <vscale x 8 x i1> @vfptosi_nxv8f16_nxv8i1(<vscale x 8 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv8f16_nxv8i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i1> @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i1> %evec
+}
+
+declare <vscale x 8 x i1> @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f16(<vscale x 8 x half>, metadata)
+define <vscale x 8 x i1> @vfptoui_nxv8f16_nxv8i1(<vscale x 8 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv8f16_nxv8i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i1> @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i1> %evec
+}
+
+declare <vscale x 8 x i8> @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f16(<vscale x 8 x half>, metadata)
+define <vscale x 8 x i8> @vfptosi_nxv8f16_nxv8i8(<vscale x 8 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv8f16_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i8> @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i8> %evec
+}
+
+declare <vscale x 8 x i8> @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f16(<vscale x 8 x half>, metadata)
+define <vscale x 8 x i8> @vfptoui_nxv8f16_nxv8i8(<vscale x 8 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv8f16_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i8> @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i8> %evec
+}
+
+declare <vscale x 8 x i16> @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f16(<vscale x 8 x half>, metadata)
+define <vscale x 8 x i16> @vfptosi_nxv8f16_nxv8i16(<vscale x 8 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv8f16_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i16> @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i16> %evec
+}
+
+declare <vscale x 8 x i16> @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f16(<vscale x 8 x half>, metadata)
+define <vscale x 8 x i16> @vfptoui_nxv8f16_nxv8i16(<vscale x 8 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv8f16_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i16> @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i16> %evec
+}
+
+declare <vscale x 8 x i32> @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f16(<vscale x 8 x half>, metadata)
+define <vscale x 8 x i32> @vfptosi_nxv8f16_nxv8i32(<vscale x 8 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv8f16_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i32> @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i32> %evec
+}
+
+declare <vscale x 8 x i32> @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f16(<vscale x 8 x half>, metadata)
+define <vscale x 8 x i32> @vfptoui_nxv8f16_nxv8i32(<vscale x 8 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv8f16_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i32> @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i32> %evec
+}
+
+declare <vscale x 8 x i64> @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f16(<vscale x 8 x half>, metadata)
+define <vscale x 8 x i64> @vfptosi_nxv8f16_nxv8i64(<vscale x 8 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv8f16_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v16, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i64> @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i64> %evec
+}
+
+declare <vscale x 8 x i64> @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f16(<vscale x 8 x half>, metadata)
+define <vscale x 8 x i64> @vfptoui_nxv8f16_nxv8i64(<vscale x 8 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv8f16_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v16, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i64> @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f16(<vscale x 8 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i64> %evec
+}
+
+declare <vscale x 16 x i1> @llvm.experimental.constrained.fptosi.nxv16i1.nxv16f16(<vscale x 16 x half>, metadata)
+define <vscale x 16 x i1> @vfptosi_nxv16f16_nxv16i1(<vscale x 16 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv16f16_nxv16i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vand.vi v8, v12, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x i1> @llvm.experimental.constrained.fptosi.nxv16i1.nxv16f16(<vscale x 16 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 16 x i1> %evec
+}
+
+declare <vscale x 16 x i1> @llvm.experimental.constrained.fptoui.nxv16i1.nxv16f16(<vscale x 16 x half>, metadata)
+define <vscale x 16 x i1> @vfptoui_nxv16f16_nxv16i1(<vscale x 16 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv16f16_nxv16i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vand.vi v8, v12, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x i1> @llvm.experimental.constrained.fptoui.nxv16i1.nxv16f16(<vscale x 16 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 16 x i1> %evec
+}
+
+declare <vscale x 16 x i8> @llvm.experimental.constrained.fptosi.nxv16i8.nxv16f16(<vscale x 16 x half>, metadata)
+define <vscale x 16 x i8> @vfptosi_nxv16f16_nxv16i8(<vscale x 16 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv16f16_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x i8> @llvm.experimental.constrained.fptosi.nxv16i8.nxv16f16(<vscale x 16 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 16 x i8> %evec
+}
+
+declare <vscale x 16 x i8> @llvm.experimental.constrained.fptoui.nxv16i8.nxv16f16(<vscale x 16 x half>, metadata)
+define <vscale x 16 x i8> @vfptoui_nxv16f16_nxv16i8(<vscale x 16 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv16f16_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x i8> @llvm.experimental.constrained.fptoui.nxv16i8.nxv16f16(<vscale x 16 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 16 x i8> %evec
+}
+
+declare <vscale x 16 x i16> @llvm.experimental.constrained.fptosi.nxv16i16.nxv16f16(<vscale x 16 x half>, metadata)
+define <vscale x 16 x i16> @vfptosi_nxv16f16_nxv16i16(<vscale x 16 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv16f16_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x i16> @llvm.experimental.constrained.fptosi.nxv16i16.nxv16f16(<vscale x 16 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 16 x i16> %evec
+}
+
+declare <vscale x 16 x i16> @llvm.experimental.constrained.fptoui.nxv16i16.nxv16f16(<vscale x 16 x half>, metadata)
+define <vscale x 16 x i16> @vfptoui_nxv16f16_nxv16i16(<vscale x 16 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv16f16_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x i16> @llvm.experimental.constrained.fptoui.nxv16i16.nxv16f16(<vscale x 16 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 16 x i16> %evec
+}
+
+declare <vscale x 16 x i32> @llvm.experimental.constrained.fptosi.nxv16i32.nxv16f16(<vscale x 16 x half>, metadata)
+define <vscale x 16 x i32> @vfptosi_nxv16f16_nxv16i32(<vscale x 16 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv16f16_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v16, v8
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x i32> @llvm.experimental.constrained.fptosi.nxv16i32.nxv16f16(<vscale x 16 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 16 x i32> %evec
+}
+
+declare <vscale x 16 x i32> @llvm.experimental.constrained.fptoui.nxv16i32.nxv16f16(<vscale x 16 x half>, metadata)
+define <vscale x 16 x i32> @vfptoui_nxv16f16_nxv16i32(<vscale x 16 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv16f16_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v16, v8
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x i32> @llvm.experimental.constrained.fptoui.nxv16i32.nxv16f16(<vscale x 16 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 16 x i32> %evec
+}
+
+declare <vscale x 32 x i1> @llvm.experimental.constrained.fptosi.nxv32i1.nxv32f16(<vscale x 32 x half>, metadata)
+define <vscale x 32 x i1> @vfptosi_nxv32f16_nxv32i1(<vscale x 32 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv32f16_nxv32i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
+; CHECK-NEXT: vand.vi v8, v16, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 32 x i1> @llvm.experimental.constrained.fptosi.nxv32i1.nxv32f16(<vscale x 32 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 32 x i1> %evec
+}
+
+declare <vscale x 32 x i1> @llvm.experimental.constrained.fptoui.nxv32i1.nxv32f16(<vscale x 32 x half>, metadata)
+define <vscale x 32 x i1> @vfptoui_nxv32f16_nxv32i1(<vscale x 32 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv32f16_nxv32i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8
+; CHECK-NEXT: vand.vi v8, v16, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 32 x i1> @llvm.experimental.constrained.fptoui.nxv32i1.nxv32f16(<vscale x 32 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 32 x i1> %evec
+}
+
+declare <vscale x 32 x i8> @llvm.experimental.constrained.fptosi.nxv32i8.nxv32f16(<vscale x 32 x half>, metadata)
+define <vscale x 32 x i8> @vfptosi_nxv32f16_nxv32i8(<vscale x 32 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv32f16_nxv32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 32 x i8> @llvm.experimental.constrained.fptosi.nxv32i8.nxv32f16(<vscale x 32 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 32 x i8> %evec
+}
+
+declare <vscale x 32 x i8> @llvm.experimental.constrained.fptoui.nxv32i8.nxv32f16(<vscale x 32 x half>, metadata)
+define <vscale x 32 x i8> @vfptoui_nxv32f16_nxv32i8(<vscale x 32 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv32f16_nxv32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 32 x i8> @llvm.experimental.constrained.fptoui.nxv32i8.nxv32f16(<vscale x 32 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 32 x i8> %evec
+}
+
+declare <vscale x 32 x i16> @llvm.experimental.constrained.fptosi.nxv32i16.nxv32f16(<vscale x 32 x half>, metadata)
+define <vscale x 32 x i16> @vfptosi_nxv32f16_nxv32i16(<vscale x 32 x half> %va) {
+; CHECK-LABEL: vfptosi_nxv32f16_nxv32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 32 x i16> @llvm.experimental.constrained.fptosi.nxv32i16.nxv32f16(<vscale x 32 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 32 x i16> %evec
+}
+
+declare <vscale x 32 x i16> @llvm.experimental.constrained.fptoui.nxv32i16.nxv32f16(<vscale x 32 x half>, metadata)
+define <vscale x 32 x i16> @vfptoui_nxv32f16_nxv32i16(<vscale x 32 x half> %va) {
+; CHECK-LABEL: vfptoui_nxv32f16_nxv32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 32 x i16> @llvm.experimental.constrained.fptoui.nxv32i16.nxv32f16(<vscale x 32 x half> %va, metadata !"fpexcept.strict")
+ ret <vscale x 32 x i16> %evec
+}
+
+declare <vscale x 1 x i1> @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f32(<vscale x 1 x float>, metadata)
+define <vscale x 1 x i1> @vfptosi_nxv1f32_nxv1i1(<vscale x 1 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv1f32_nxv1i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i1> @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i1> %evec
+}
+
+declare <vscale x 1 x i1> @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f32(<vscale x 1 x float>, metadata)
+define <vscale x 1 x i1> @vfptoui_nxv1f32_nxv1i1(<vscale x 1 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv1f32_nxv1i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i1> @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i1> %evec
+}
+
+declare <vscale x 1 x i8> @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f32(<vscale x 1 x float>, metadata)
+define <vscale x 1 x i8> @vfptosi_nxv1f32_nxv1i8(<vscale x 1 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv1f32_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i8> @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i8> %evec
+}
+
+declare <vscale x 1 x i8> @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f32(<vscale x 1 x float>, metadata)
+define <vscale x 1 x i8> @vfptoui_nxv1f32_nxv1i8(<vscale x 1 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv1f32_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i8> @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i8> %evec
+}
+
+declare <vscale x 1 x i16> @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f32(<vscale x 1 x float>, metadata)
+define <vscale x 1 x i16> @vfptosi_nxv1f32_nxv1i16(<vscale x 1 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv1f32_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i16> @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i16> %evec
+}
+
+declare <vscale x 1 x i16> @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f32(<vscale x 1 x float>, metadata)
+define <vscale x 1 x i16> @vfptoui_nxv1f32_nxv1i16(<vscale x 1 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv1f32_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i16> @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i16> %evec
+}
+
+declare <vscale x 1 x i32> @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f32(<vscale x 1 x float>, metadata)
+define <vscale x 1 x i32> @vfptosi_nxv1f32_nxv1i32(<vscale x 1 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv1f32_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i32> @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i32> %evec
+}
+
+declare <vscale x 1 x i32> @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f32(<vscale x 1 x float>, metadata)
+define <vscale x 1 x i32> @vfptoui_nxv1f32_nxv1i32(<vscale x 1 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv1f32_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i32> @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i32> %evec
+}
+
+declare <vscale x 1 x i64> @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f32(<vscale x 1 x float>, metadata)
+define <vscale x 1 x i64> @vfptosi_nxv1f32_nxv1i64(<vscale x 1 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv1f32_nxv1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i64> @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i64> %evec
+}
+
+declare <vscale x 1 x i64> @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f32(<vscale x 1 x float>, metadata)
+define <vscale x 1 x i64> @vfptoui_nxv1f32_nxv1i64(<vscale x 1 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv1f32_nxv1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i64> @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f32(<vscale x 1 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i64> %evec
+}
+
+declare <vscale x 2 x i1> @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f32(<vscale x 2 x float>, metadata)
+define <vscale x 2 x i1> @vfptosi_nxv2f32_nxv2i1(<vscale x 2 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv2f32_nxv2i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i1> @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i1> %evec
+}
+
+declare <vscale x 2 x i1> @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f32(<vscale x 2 x float>, metadata)
+define <vscale x 2 x i1> @vfptoui_nxv2f32_nxv2i1(<vscale x 2 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv2f32_nxv2i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i1> @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i1> %evec
+}
+
+declare <vscale x 2 x i8> @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f32(<vscale x 2 x float>, metadata)
+define <vscale x 2 x i8> @vfptosi_nxv2f32_nxv2i8(<vscale x 2 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv2f32_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i8> @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i8> %evec
+}
+
+declare <vscale x 2 x i8> @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f32(<vscale x 2 x float>, metadata)
+define <vscale x 2 x i8> @vfptoui_nxv2f32_nxv2i8(<vscale x 2 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv2f32_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i8> @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i8> %evec
+}
+
+declare <vscale x 2 x i16> @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float>, metadata)
+define <vscale x 2 x i16> @vfptosi_nxv2f32_nxv2i16(<vscale x 2 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv2f32_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i16> @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i16> %evec
+}
+
+declare <vscale x 2 x i16> @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f32(<vscale x 2 x float>, metadata)
+define <vscale x 2 x i16> @vfptoui_nxv2f32_nxv2i16(<vscale x 2 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv2f32_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i16> @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i16> %evec
+}
+
+declare <vscale x 2 x i32> @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f32(<vscale x 2 x float>, metadata)
+define <vscale x 2 x i32> @vfptosi_nxv2f32_nxv2i32(<vscale x 2 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv2f32_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i32> @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i32> %evec
+}
+
+declare <vscale x 2 x i32> @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f32(<vscale x 2 x float>, metadata)
+define <vscale x 2 x i32> @vfptoui_nxv2f32_nxv2i32(<vscale x 2 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv2f32_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i32> @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i32> %evec
+}
+
+declare <vscale x 2 x i64> @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f32(<vscale x 2 x float>, metadata)
+define <vscale x 2 x i64> @vfptosi_nxv2f32_nxv2i64(<vscale x 2 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv2f32_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i64> @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i64> %evec
+}
+
+declare <vscale x 2 x i64> @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f32(<vscale x 2 x float>, metadata)
+define <vscale x 2 x i64> @vfptoui_nxv2f32_nxv2i64(<vscale x 2 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv2f32_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i64> @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f32(<vscale x 2 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i64> %evec
+}
+
+declare <vscale x 4 x i1> @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f32(<vscale x 4 x float>, metadata)
+define <vscale x 4 x i1> @vfptosi_nxv4f32_nxv4i1(<vscale x 4 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv4f32_nxv4i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i1> @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i1> %evec
+}
+
+declare <vscale x 4 x i1> @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f32(<vscale x 4 x float>, metadata)
+define <vscale x 4 x i1> @vfptoui_nxv4f32_nxv4i1(<vscale x 4 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv4f32_nxv4i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i1> @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i1> %evec
+}
+
+declare <vscale x 4 x i8> @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f32(<vscale x 4 x float>, metadata)
+define <vscale x 4 x i8> @vfptosi_nxv4f32_nxv4i8(<vscale x 4 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv4f32_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v10, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i8> @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i8> %evec
+}
+
+declare <vscale x 4 x i8> @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f32(<vscale x 4 x float>, metadata)
+define <vscale x 4 x i8> @vfptoui_nxv4f32_nxv4i8(<vscale x 4 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv4f32_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v10, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i8> @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i8> %evec
+}
+
+declare <vscale x 4 x i16> @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f32(<vscale x 4 x float>, metadata)
+define <vscale x 4 x i16> @vfptosi_nxv4f32_nxv4i16(<vscale x 4 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv4f32_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i16> @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i16> %evec
+}
+
+declare <vscale x 4 x i16> @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f32(<vscale x 4 x float>, metadata)
+define <vscale x 4 x i16> @vfptoui_nxv4f32_nxv4i16(<vscale x 4 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv4f32_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i16> @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i16> %evec
+}
+
+declare <vscale x 4 x i32> @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f32(<vscale x 4 x float>, metadata)
+define <vscale x 4 x i32> @vfptosi_nxv4f32_nxv4i32(<vscale x 4 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv4f32_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i32> @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i32> %evec
+}
+
+declare <vscale x 4 x i32> @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f32(<vscale x 4 x float>, metadata)
+define <vscale x 4 x i32> @vfptoui_nxv4f32_nxv4i32(<vscale x 4 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv4f32_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i32> @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i32> %evec
+}
+
+declare <vscale x 4 x i64> @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f32(<vscale x 4 x float>, metadata)
+define <vscale x 4 x i64> @vfptosi_nxv4f32_nxv4i64(<vscale x 4 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv4f32_nxv4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i64> @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i64> %evec
+}
+
+declare <vscale x 4 x i64> @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f32(<vscale x 4 x float>, metadata)
+define <vscale x 4 x i64> @vfptoui_nxv4f32_nxv4i64(<vscale x 4 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv4f32_nxv4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i64> @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f32(<vscale x 4 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i64> %evec
+}
+
+declare <vscale x 8 x i1> @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f32(<vscale x 8 x float>, metadata)
+define <vscale x 8 x i1> @vfptosi_nxv8f32_nxv8i1(<vscale x 8 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv8f32_nxv8i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vand.vi v8, v12, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i1> @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i1> %evec
+}
+
+declare <vscale x 8 x i1> @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f32(<vscale x 8 x float>, metadata)
+define <vscale x 8 x i1> @vfptoui_nxv8f32_nxv8i1(<vscale x 8 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv8f32_nxv8i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vand.vi v8, v12, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i1> @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i1> %evec
+}
+
+declare <vscale x 8 x i8> @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f32(<vscale x 8 x float>, metadata)
+define <vscale x 8 x i8> @vfptosi_nxv8f32_nxv8i8(<vscale x 8 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv8f32_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i8> @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i8> %evec
+}
+
+declare <vscale x 8 x i8> @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f32(<vscale x 8 x float>, metadata)
+define <vscale x 8 x i8> @vfptoui_nxv8f32_nxv8i8(<vscale x 8 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv8f32_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i8> @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i8> %evec
+}
+
+declare <vscale x 8 x i16> @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f32(<vscale x 8 x float>, metadata)
+define <vscale x 8 x i16> @vfptosi_nxv8f32_nxv8i16(<vscale x 8 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv8f32_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i16> @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i16> %evec
+}
+
+declare <vscale x 8 x i16> @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f32(<vscale x 8 x float>, metadata)
+define <vscale x 8 x i16> @vfptoui_nxv8f32_nxv8i16(<vscale x 8 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv8f32_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i16> @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i16> %evec
+}
+
+declare <vscale x 8 x i32> @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f32(<vscale x 8 x float>, metadata)
+define <vscale x 8 x i32> @vfptosi_nxv8f32_nxv8i32(<vscale x 8 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv8f32_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i32> @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i32> %evec
+}
+
+declare <vscale x 8 x i32> @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f32(<vscale x 8 x float>, metadata)
+define <vscale x 8 x i32> @vfptoui_nxv8f32_nxv8i32(<vscale x 8 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv8f32_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i32> @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i32> %evec
+}
+
+declare <vscale x 8 x i64> @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f32(<vscale x 8 x float>, metadata)
+define <vscale x 8 x i64> @vfptosi_nxv8f32_nxv8i64(<vscale x 8 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv8f32_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v16, v8
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i64> @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i64> %evec
+}
+
+declare <vscale x 8 x i64> @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f32(<vscale x 8 x float>, metadata)
+define <vscale x 8 x i64> @vfptoui_nxv8f32_nxv8i64(<vscale x 8 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv8f32_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v16, v8
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i64> @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f32(<vscale x 8 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i64> %evec
+}
+
+declare <vscale x 16 x i1> @llvm.experimental.constrained.fptosi.nxv16i1.nxv16f32(<vscale x 16 x float>, metadata)
+define <vscale x 16 x i1> @vfptosi_nxv16f32_nxv16i1(<vscale x 16 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv16f32_nxv16i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
+; CHECK-NEXT: vand.vi v8, v16, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x i1> @llvm.experimental.constrained.fptosi.nxv16i1.nxv16f32(<vscale x 16 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 16 x i1> %evec
+}
+
+declare <vscale x 16 x i1> @llvm.experimental.constrained.fptoui.nxv16i1.nxv16f32(<vscale x 16 x float>, metadata)
+define <vscale x 16 x i1> @vfptoui_nxv16f32_nxv16i1(<vscale x 16 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv16f32_nxv16i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8
+; CHECK-NEXT: vand.vi v8, v16, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x i1> @llvm.experimental.constrained.fptoui.nxv16i1.nxv16f32(<vscale x 16 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 16 x i1> %evec
+}
+
+declare <vscale x 16 x i8> @llvm.experimental.constrained.fptosi.nxv16i8.nxv16f32(<vscale x 16 x float>, metadata)
+define <vscale x 16 x i8> @vfptosi_nxv16f32_nxv16i8(<vscale x 16 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv16f32_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
+; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v16, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x i8> @llvm.experimental.constrained.fptosi.nxv16i8.nxv16f32(<vscale x 16 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 16 x i8> %evec
+}
+
+declare <vscale x 16 x i8> @llvm.experimental.constrained.fptoui.nxv16i8.nxv16f32(<vscale x 16 x float>, metadata)
+define <vscale x 16 x i8> @vfptoui_nxv16f32_nxv16i8(<vscale x 16 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv16f32_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8
+; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v16, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x i8> @llvm.experimental.constrained.fptoui.nxv16i8.nxv16f32(<vscale x 16 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 16 x i8> %evec
+}
+
+declare <vscale x 16 x i16> @llvm.experimental.constrained.fptosi.nxv16i16.nxv16f32(<vscale x 16 x float>, metadata)
+define <vscale x 16 x i16> @vfptosi_nxv16f32_nxv16i16(<vscale x 16 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv16f32_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x i16> @llvm.experimental.constrained.fptosi.nxv16i16.nxv16f32(<vscale x 16 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 16 x i16> %evec
+}
+
+declare <vscale x 16 x i16> @llvm.experimental.constrained.fptoui.nxv16i16.nxv16f32(<vscale x 16 x float>, metadata)
+define <vscale x 16 x i16> @vfptoui_nxv16f32_nxv16i16(<vscale x 16 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv16f32_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x i16> @llvm.experimental.constrained.fptoui.nxv16i16.nxv16f32(<vscale x 16 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 16 x i16> %evec
+}
+
+declare <vscale x 16 x i32> @llvm.experimental.constrained.fptosi.nxv16i32.nxv16f32(<vscale x 16 x float>, metadata)
+define <vscale x 16 x i32> @vfptosi_nxv16f32_nxv16i32(<vscale x 16 x float> %va) {
+; CHECK-LABEL: vfptosi_nxv16f32_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x i32> @llvm.experimental.constrained.fptosi.nxv16i32.nxv16f32(<vscale x 16 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 16 x i32> %evec
+}
+
+declare <vscale x 16 x i32> @llvm.experimental.constrained.fptoui.nxv16i32.nxv16f32(<vscale x 16 x float>, metadata)
+define <vscale x 16 x i32> @vfptoui_nxv16f32_nxv16i32(<vscale x 16 x float> %va) {
+; CHECK-LABEL: vfptoui_nxv16f32_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x i32> @llvm.experimental.constrained.fptoui.nxv16i32.nxv16f32(<vscale x 16 x float> %va, metadata !"fpexcept.strict")
+ ret <vscale x 16 x i32> %evec
+}
+
+declare <vscale x 1 x i1> @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f64(<vscale x 1 x double>, metadata)
+define <vscale x 1 x i1> @vfptosi_nxv1f64_nxv1i1(<vscale x 1 x double> %va) {
+; CHECK-LABEL: vfptosi_nxv1f64_nxv1i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i1> @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f64(<vscale x 1 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i1> %evec
+}
+
+declare <vscale x 1 x i1> @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f64(<vscale x 1 x double>, metadata)
+define <vscale x 1 x i1> @vfptoui_nxv1f64_nxv1i1(<vscale x 1 x double> %va) {
+; CHECK-LABEL: vfptoui_nxv1f64_nxv1i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vand.vi v8, v9, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i1> @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f64(<vscale x 1 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i1> %evec
+}
+
+declare <vscale x 1 x i8> @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f64(<vscale x 1 x double>, metadata)
+define <vscale x 1 x i8> @vfptosi_nxv1f64_nxv1i8(<vscale x 1 x double> %va) {
+; CHECK-LABEL: vfptosi_nxv1f64_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i8> @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f64(<vscale x 1 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i8> %evec
+}
+
+declare <vscale x 1 x i8> @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f64(<vscale x 1 x double>, metadata)
+define <vscale x 1 x i8> @vfptoui_nxv1f64_nxv1i8(<vscale x 1 x double> %va) {
+; CHECK-LABEL: vfptoui_nxv1f64_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i8> @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f64(<vscale x 1 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i8> %evec
+}
+
+declare <vscale x 1 x i16> @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f64(<vscale x 1 x double>, metadata)
+define <vscale x 1 x i16> @vfptosi_nxv1f64_nxv1i16(<vscale x 1 x double> %va) {
+; CHECK-LABEL: vfptosi_nxv1f64_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i16> @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f64(<vscale x 1 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i16> %evec
+}
+
+declare <vscale x 1 x i16> @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f64(<vscale x 1 x double>, metadata)
+define <vscale x 1 x i16> @vfptoui_nxv1f64_nxv1i16(<vscale x 1 x double> %va) {
+; CHECK-LABEL: vfptoui_nxv1f64_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v9, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i16> @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f64(<vscale x 1 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i16> %evec
+}
+
+declare <vscale x 1 x i32> @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f64(<vscale x 1 x double>, metadata)
+define <vscale x 1 x i32> @vfptosi_nxv1f64_nxv1i32(<vscale x 1 x double> %va) {
+; CHECK-LABEL: vfptosi_nxv1f64_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i32> @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f64(<vscale x 1 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i32> %evec
+}
+
+declare <vscale x 1 x i32> @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f64(<vscale x 1 x double>, metadata)
+define <vscale x 1 x i32> @vfptoui_nxv1f64_nxv1i32(<vscale x 1 x double> %va) {
+; CHECK-LABEL: vfptoui_nxv1f64_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i32> @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f64(<vscale x 1 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i32> %evec
+}
+
+declare <vscale x 1 x i64> @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f64(<vscale x 1 x double>, metadata)
+define <vscale x 1 x i64> @vfptosi_nxv1f64_nxv1i64(<vscale x 1 x double> %va) {
+; CHECK-LABEL: vfptosi_nxv1f64_nxv1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i64> @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f64(<vscale x 1 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i64> %evec
+}
+
+declare <vscale x 1 x i64> @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f64(<vscale x 1 x double>, metadata)
+define <vscale x 1 x i64> @vfptoui_nxv1f64_nxv1i64(<vscale x 1 x double> %va) {
+; CHECK-LABEL: vfptoui_nxv1f64_nxv1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x i64> @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f64(<vscale x 1 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 1 x i64> %evec
+}
+
+declare <vscale x 2 x i1> @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f64(<vscale x 2 x double>, metadata)
+define <vscale x 2 x i1> @vfptosi_nxv2f64_nxv2i1(<vscale x 2 x double> %va) {
+; CHECK-LABEL: vfptosi_nxv2f64_nxv2i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i1> @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f64(<vscale x 2 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i1> %evec
+}
+
+declare <vscale x 2 x i1> @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f64(<vscale x 2 x double>, metadata)
+define <vscale x 2 x i1> @vfptoui_nxv2f64_nxv2i1(<vscale x 2 x double> %va) {
+; CHECK-LABEL: vfptoui_nxv2f64_nxv2i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i1> @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f64(<vscale x 2 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i1> %evec
+}
+
+declare <vscale x 2 x i8> @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f64(<vscale x 2 x double>, metadata)
+define <vscale x 2 x i8> @vfptosi_nxv2f64_nxv2i8(<vscale x 2 x double> %va) {
+; CHECK-LABEL: vfptosi_nxv2f64_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v10, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i8> @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f64(<vscale x 2 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i8> %evec
+}
+
+declare <vscale x 2 x i8> @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f64(<vscale x 2 x double>, metadata)
+define <vscale x 2 x i8> @vfptoui_nxv2f64_nxv2i8(<vscale x 2 x double> %va) {
+; CHECK-LABEL: vfptoui_nxv2f64_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v10, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i8> @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f64(<vscale x 2 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i8> %evec
+}
+
+declare <vscale x 2 x i16> @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f64(<vscale x 2 x double>, metadata)
+define <vscale x 2 x i16> @vfptosi_nxv2f64_nxv2i16(<vscale x 2 x double> %va) {
+; CHECK-LABEL: vfptosi_nxv2f64_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v10, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i16> @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f64(<vscale x 2 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i16> %evec
+}
+
+declare <vscale x 2 x i16> @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f64(<vscale x 2 x double>, metadata)
+define <vscale x 2 x i16> @vfptoui_nxv2f64_nxv2i16(<vscale x 2 x double> %va) {
+; CHECK-LABEL: vfptoui_nxv2f64_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v10, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i16> @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f64(<vscale x 2 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i16> %evec
+}
+
+declare <vscale x 2 x i32> @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f64(<vscale x 2 x double>, metadata)
+define <vscale x 2 x i32> @vfptosi_nxv2f64_nxv2i32(<vscale x 2 x double> %va) {
+; CHECK-LABEL: vfptosi_nxv2f64_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i32> @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f64(<vscale x 2 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i32> %evec
+}
+
+declare <vscale x 2 x i32> @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f64(<vscale x 2 x double>, metadata)
+define <vscale x 2 x i32> @vfptoui_nxv2f64_nxv2i32(<vscale x 2 x double> %va) {
+; CHECK-LABEL: vfptoui_nxv2f64_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i32> @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f64(<vscale x 2 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i32> %evec
+}
+
+declare <vscale x 2 x i64> @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f64(<vscale x 2 x double>, metadata)
+define <vscale x 2 x i64> @vfptosi_nxv2f64_nxv2i64(<vscale x 2 x double> %va) {
+; CHECK-LABEL: vfptosi_nxv2f64_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i64> @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f64(<vscale x 2 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i64> %evec
+}
+
+declare <vscale x 2 x i64> @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f64(<vscale x 2 x double>, metadata)
+define <vscale x 2 x i64> @vfptoui_nxv2f64_nxv2i64(<vscale x 2 x double> %va) {
+; CHECK-LABEL: vfptoui_nxv2f64_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x i64> @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f64(<vscale x 2 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 2 x i64> %evec
+}
+
+declare <vscale x 4 x i1> @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f64(<vscale x 4 x double>, metadata)
+define <vscale x 4 x i1> @vfptosi_nxv4f64_nxv4i1(<vscale x 4 x double> %va) {
+; CHECK-LABEL: vfptosi_nxv4f64_nxv4i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vand.vi v8, v12, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i1> @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f64(<vscale x 4 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i1> %evec
+}
+
+declare <vscale x 4 x i1> @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f64(<vscale x 4 x double>, metadata)
+define <vscale x 4 x i1> @vfptoui_nxv4f64_nxv4i1(<vscale x 4 x double> %va) {
+; CHECK-LABEL: vfptoui_nxv4f64_nxv4i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vand.vi v8, v12, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i1> @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f64(<vscale x 4 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i1> %evec
+}
+
+declare <vscale x 4 x i8> @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f64(<vscale x 4 x double>, metadata)
+define <vscale x 4 x i8> @vfptosi_nxv4f64_nxv4i8(<vscale x 4 x double> %va) {
+; CHECK-LABEL: vfptosi_nxv4f64_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i8> @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f64(<vscale x 4 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i8> %evec
+}
+
+declare <vscale x 4 x i8> @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f64(<vscale x 4 x double>, metadata)
+define <vscale x 4 x i8> @vfptoui_nxv4f64_nxv4i8(<vscale x 4 x double> %va) {
+; CHECK-LABEL: vfptoui_nxv4f64_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i8> @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f64(<vscale x 4 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i8> %evec
+}
+
+declare <vscale x 4 x i16> @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f64(<vscale x 4 x double>, metadata)
+define <vscale x 4 x i16> @vfptosi_nxv4f64_nxv4i16(<vscale x 4 x double> %va) {
+; CHECK-LABEL: vfptosi_nxv4f64_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i16> @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f64(<vscale x 4 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i16> %evec
+}
+
+declare <vscale x 4 x i16> @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f64(<vscale x 4 x double>, metadata)
+define <vscale x 4 x i16> @vfptoui_nxv4f64_nxv4i16(<vscale x 4 x double> %va) {
+; CHECK-LABEL: vfptoui_nxv4f64_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i16> @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f64(<vscale x 4 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i16> %evec
+}
+
+declare <vscale x 4 x i32> @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f64(<vscale x 4 x double>, metadata)
+define <vscale x 4 x i32> @vfptosi_nxv4f64_nxv4i32(<vscale x 4 x double> %va) {
+; CHECK-LABEL: vfptosi_nxv4f64_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i32> @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f64(<vscale x 4 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i32> %evec
+}
+
+declare <vscale x 4 x i32> @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f64(<vscale x 4 x double>, metadata)
+define <vscale x 4 x i32> @vfptoui_nxv4f64_nxv4i32(<vscale x 4 x double> %va) {
+; CHECK-LABEL: vfptoui_nxv4f64_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i32> @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f64(<vscale x 4 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i32> %evec
+}
+
+declare <vscale x 4 x i64> @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f64(<vscale x 4 x double>, metadata)
+define <vscale x 4 x i64> @vfptosi_nxv4f64_nxv4i64(<vscale x 4 x double> %va) {
+; CHECK-LABEL: vfptosi_nxv4f64_nxv4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i64> @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f64(<vscale x 4 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i64> %evec
+}
+
+declare <vscale x 4 x i64> @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f64(<vscale x 4 x double>, metadata)
+define <vscale x 4 x i64> @vfptoui_nxv4f64_nxv4i64(<vscale x 4 x double> %va) {
+; CHECK-LABEL: vfptoui_nxv4f64_nxv4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x i64> @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f64(<vscale x 4 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 4 x i64> %evec
+}
+
+declare <vscale x 8 x i1> @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f64(<vscale x 8 x double>, metadata)
+define <vscale x 8 x i1> @vfptosi_nxv8f64_nxv8i1(<vscale x 8 x double> %va) {
+; CHECK-LABEL: vfptosi_nxv8f64_nxv8i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
+; CHECK-NEXT: vand.vi v8, v16, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i1> @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f64(<vscale x 8 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i1> %evec
+}
+
+declare <vscale x 8 x i1> @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f64(<vscale x 8 x double>, metadata)
+define <vscale x 8 x i1> @vfptoui_nxv8f64_nxv8i1(<vscale x 8 x double> %va) {
+; CHECK-LABEL: vfptoui_nxv8f64_nxv8i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8
+; CHECK-NEXT: vand.vi v8, v16, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i1> @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f64(<vscale x 8 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i1> %evec
+}
+
+declare <vscale x 8 x i8> @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f64(<vscale x 8 x double>, metadata)
+define <vscale x 8 x i8> @vfptosi_nxv8f64_nxv8i8(<vscale x 8 x double> %va) {
+; CHECK-LABEL: vfptosi_nxv8f64_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vnsrl.wi v10, v16, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v10, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i8> @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f64(<vscale x 8 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i8> %evec
+}
+
+declare <vscale x 8 x i8> @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f64(<vscale x 8 x double>, metadata)
+define <vscale x 8 x i8> @vfptoui_nxv8f64_nxv8i8(<vscale x 8 x double> %va) {
+; CHECK-LABEL: vfptoui_nxv8f64_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vnsrl.wi v10, v16, 0
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v10, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i8> @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f64(<vscale x 8 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i8> %evec
+}
+
+declare <vscale x 8 x i16> @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f64(<vscale x 8 x double>, metadata)
+define <vscale x 8 x i16> @vfptosi_nxv8f64_nxv8i16(<vscale x 8 x double> %va) {
+; CHECK-LABEL: vfptosi_nxv8f64_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v16, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i16> @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f64(<vscale x 8 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i16> %evec
+}
+
+declare <vscale x 8 x i16> @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f64(<vscale x 8 x double>, metadata)
+define <vscale x 8 x i16> @vfptoui_nxv8f64_nxv8i16(<vscale x 8 x double> %va) {
+; CHECK-LABEL: vfptoui_nxv8f64_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vnsrl.wi v8, v16, 0
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i16> @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f64(<vscale x 8 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i16> %evec
+}
+
+declare <vscale x 8 x i32> @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f64(<vscale x 8 x double>, metadata)
+define <vscale x 8 x i32> @vfptosi_nxv8f64_nxv8i32(<vscale x 8 x double> %va) {
+; CHECK-LABEL: vfptosi_nxv8f64_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i32> @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f64(<vscale x 8 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i32> %evec
+}
+
+declare <vscale x 8 x i32> @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f64(<vscale x 8 x double>, metadata)
+define <vscale x 8 x i32> @vfptoui_nxv8f64_nxv8i32(<vscale x 8 x double> %va) {
+; CHECK-LABEL: vfptoui_nxv8f64_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i32> @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f64(<vscale x 8 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i32> %evec
+}
+
+declare <vscale x 8 x i64> @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f64(<vscale x 8 x double>, metadata)
+define <vscale x 8 x i64> @vfptosi_nxv8f64_nxv8i64(<vscale x 8 x double> %va) {
+; CHECK-LABEL: vfptosi_nxv8f64_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i64> @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f64(<vscale x 8 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i64> %evec
+}
+
+declare <vscale x 8 x i64> @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f64(<vscale x 8 x double>, metadata)
+define <vscale x 8 x i64> @vfptoui_nxv8f64_nxv8i64(<vscale x 8 x double> %va) {
+; CHECK-LABEL: vfptoui_nxv8f64_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x i64> @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f64(<vscale x 8 x double> %va, metadata !"fpexcept.strict")
+ ret <vscale x 8 x i64> %evec
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vitofp-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vitofp-constrained-sdnode.ll
new file mode 100644
index 0000000000000..dbb712da54b8d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vitofp-constrained-sdnode.ll
@@ -0,0 +1,1744 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+
+declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
+define <vscale x 1 x half> @vsitofp_nxv1i1_nxv1f16(<vscale x 1 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv1i1_nxv1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i1(<vscale x 1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x half> %evec
+}
+
+declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
+define <vscale x 1 x half> @vuitofp_nxv1i1_nxv1f16(<vscale x 1 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv1i1_nxv1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i1(<vscale x 1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x half> %evec
+}
+
+declare <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
+define <vscale x 1 x float> @vsitofp_nxv1i1_nxv1f32(<vscale x 1 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv1i1_nxv1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i1(<vscale x 1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x float> %evec
+}
+
+declare <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
+define <vscale x 1 x float> @vuitofp_nxv1i1_nxv1f32(<vscale x 1 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv1i1_nxv1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i1(<vscale x 1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x float> %evec
+}
+
+declare <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
+define <vscale x 1 x double> @vsitofp_nxv1i1_nxv1f64(<vscale x 1 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv1i1_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i1(<vscale x 1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x double> %evec
+}
+
+declare <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
+define <vscale x 1 x double> @vuitofp_nxv1i1_nxv1f64(<vscale x 1 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv1i1_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i1(<vscale x 1 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x double> %evec
+}
+
+declare <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
+define <vscale x 2 x half> @vsitofp_nxv2i1_nxv2f16(<vscale x 2 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv2i1_nxv2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i1(<vscale x 2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x half> %evec
+}
+
+declare <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
+define <vscale x 2 x half> @vuitofp_nxv2i1_nxv2f16(<vscale x 2 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv2i1_nxv2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i1(<vscale x 2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x half> %evec
+}
+
+declare <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
+define <vscale x 2 x float> @vsitofp_nxv2i1_nxv2f32(<vscale x 2 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv2i1_nxv2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i1(<vscale x 2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x float> %evec
+}
+
+declare <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
+define <vscale x 2 x float> @vuitofp_nxv2i1_nxv2f32(<vscale x 2 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv2i1_nxv2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i1(<vscale x 2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x float> %evec
+}
+
+declare <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
+define <vscale x 2 x double> @vsitofp_nxv2i1_nxv2f64(<vscale x 2 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv2i1_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v10, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i1(<vscale x 2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x double> %evec
+}
+
+declare <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
+define <vscale x 2 x double> @vuitofp_nxv2i1_nxv2f64(<vscale x 2 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv2i1_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i1(<vscale x 2 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x double> %evec
+}
+
+declare <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
+define <vscale x 4 x half> @vsitofp_nxv4i1_nxv4f16(<vscale x 4 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv4i1_nxv4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i1(<vscale x 4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x half> %evec
+}
+
+declare <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
+define <vscale x 4 x half> @vuitofp_nxv4i1_nxv4f16(<vscale x 4 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv4i1_nxv4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i1(<vscale x 4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x half> %evec
+}
+
+declare <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
+define <vscale x 4 x float> @vsitofp_nxv4i1_nxv4f32(<vscale x 4 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv4i1_nxv4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v10, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i1(<vscale x 4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x float> %evec
+}
+
+declare <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
+define <vscale x 4 x float> @vuitofp_nxv4i1_nxv4f32(<vscale x 4 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv4i1_nxv4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i1(<vscale x 4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x float> %evec
+}
+
+declare <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
+define <vscale x 4 x double> @vsitofp_nxv4i1_nxv4f64(<vscale x 4 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv4i1_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v12, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i1(<vscale x 4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x double> %evec
+}
+
+declare <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
+define <vscale x 4 x double> @vuitofp_nxv4i1_nxv4f64(<vscale x 4 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv4i1_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i1(<vscale x 4 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x double> %evec
+}
+
+declare <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
+define <vscale x 8 x half> @vsitofp_nxv8i1_nxv8f16(<vscale x 8 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv8i1_nxv8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v10, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i1(<vscale x 8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x half> %evec
+}
+
+declare <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
+define <vscale x 8 x half> @vuitofp_nxv8i1_nxv8f16(<vscale x 8 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv8i1_nxv8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i1(<vscale x 8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x half> %evec
+}
+
+declare <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
+define <vscale x 8 x float> @vsitofp_nxv8i1_nxv8f32(<vscale x 8 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv8i1_nxv8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v12, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i1(<vscale x 8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x float> %evec
+}
+
+declare <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
+define <vscale x 8 x float> @vuitofp_nxv8i1_nxv8f32(<vscale x 8 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv8i1_nxv8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i1(<vscale x 8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x float> %evec
+}
+
+declare <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
+define <vscale x 8 x double> @vsitofp_nxv8i1_nxv8f64(<vscale x 8 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv8i1_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v16, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i1(<vscale x 8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x double> %evec
+}
+
+declare <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
+define <vscale x 8 x double> @vuitofp_nxv8i1_nxv8f64(<vscale x 8 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv8i1_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v16, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i1(<vscale x 8 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x double> %evec
+}
+
+declare <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i1(<vscale x 16 x i1>, metadata, metadata)
+define <vscale x 16 x half> @vsitofp_nxv16i1_nxv16f16(<vscale x 16 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv16i1_nxv16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v12, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i1(<vscale x 16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 16 x half> %evec
+}
+
+declare <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i1(<vscale x 16 x i1>, metadata, metadata)
+define <vscale x 16 x half> @vuitofp_nxv16i1_nxv16f16(<vscale x 16 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv16i1_nxv16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i1(<vscale x 16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 16 x half> %evec
+}
+
+declare <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i1(<vscale x 16 x i1>, metadata, metadata)
+define <vscale x 16 x float> @vsitofp_nxv16i1_nxv16f32(<vscale x 16 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv16i1_nxv16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v16, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i1(<vscale x 16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 16 x float> %evec
+}
+
+declare <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i1(<vscale x 16 x i1>, metadata, metadata)
+define <vscale x 16 x float> @vuitofp_nxv16i1_nxv16f32(<vscale x 16 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv16i1_nxv16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v16, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i1(<vscale x 16 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 16 x float> %evec
+}
+
+declare <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i1(<vscale x 32 x i1>, metadata, metadata)
+define <vscale x 32 x half> @vsitofp_nxv32i1_nxv32f16(<vscale x 32 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv32i1_nxv32f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v16, v8, -1, v0
+; CHECK-NEXT: vfwcvt.f.x.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i1(<vscale x 32 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 32 x half> %evec
+}
+
+declare <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i1(<vscale x 32 x i1>, metadata, metadata)
+define <vscale x 32 x half> @vuitofp_nxv32i1_nxv32f16(<vscale x 32 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv32i1_nxv32f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v16, v8, 1, v0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i1(<vscale x 32 x i1> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 32 x half> %evec
+}
+
+declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
+define <vscale x 1 x half> @vsitofp_nxv1i8_nxv1f16(<vscale x 1 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv1i8_nxv1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i8(<vscale x 1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x half> %evec
+}
+
+declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i7(<vscale x 1 x i7>, metadata, metadata)
+define <vscale x 1 x half> @vsitofp_nxv1i7_nxv1f16(<vscale x 1 x i7> %va) {
+; CHECK-LABEL: vsitofp_nxv1i7_nxv1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vsra.vi v9, v8, 1
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i7(<vscale x 1 x i7> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x half> %evec
+}
+
+declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i7(<vscale x 1 x i7>, metadata, metadata)
+define <vscale x 1 x half> @vuitofp_nxv1i7_nxv1f16(<vscale x 1 x i7> %va) {
+; CHECK-LABEL: vuitofp_nxv1i7_nxv1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 127
+; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vand.vx v9, v8, a0
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i7(<vscale x 1 x i7> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x half> %evec
+}
+
+declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
+define <vscale x 1 x half> @vuitofp_nxv1i8_nxv1f16(<vscale x 1 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv1i8_nxv1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i8(<vscale x 1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x half> %evec
+}
+
+declare <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
+define <vscale x 1 x float> @vsitofp_nxv1i8_nxv1f32(<vscale x 1 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv1i8_nxv1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vsext.vf2 v9, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i8(<vscale x 1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x float> %evec
+}
+
+declare <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
+define <vscale x 1 x float> @vuitofp_nxv1i8_nxv1f32(<vscale x 1 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv1i8_nxv1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vzext.vf2 v9, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i8(<vscale x 1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x float> %evec
+}
+
+declare <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
+define <vscale x 1 x double> @vsitofp_nxv1i8_nxv1f64(<vscale x 1 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv1i8_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v9, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i8(<vscale x 1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x double> %evec
+}
+
+declare <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
+define <vscale x 1 x double> @vuitofp_nxv1i8_nxv1f64(<vscale x 1 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv1i8_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf4 v9, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i8(<vscale x 1 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x double> %evec
+}
+
+declare <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
+define <vscale x 2 x half> @vsitofp_nxv2i8_nxv2f16(<vscale x 2 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv2i8_nxv2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i8(<vscale x 2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x half> %evec
+}
+
+declare <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
+define <vscale x 2 x half> @vuitofp_nxv2i8_nxv2f16(<vscale x 2 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv2i8_nxv2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i8(<vscale x 2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x half> %evec
+}
+
+declare <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
+define <vscale x 2 x float> @vsitofp_nxv2i8_nxv2f32(<vscale x 2 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv2i8_nxv2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v9, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i8(<vscale x 2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x float> %evec
+}
+
+declare <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
+define <vscale x 2 x float> @vuitofp_nxv2i8_nxv2f32(<vscale x 2 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv2i8_nxv2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v9, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i8(<vscale x 2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x float> %evec
+}
+
+declare <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
+define <vscale x 2 x double> @vsitofp_nxv2i8_nxv2f64(<vscale x 2 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv2i8_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i8(<vscale x 2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x double> %evec
+}
+
+declare <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
+define <vscale x 2 x double> @vuitofp_nxv2i8_nxv2f64(<vscale x 2 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv2i8_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v10, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i8(<vscale x 2 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x double> %evec
+}
+
+declare <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
+define <vscale x 4 x half> @vsitofp_nxv4i8_nxv4f16(<vscale x 4 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv4i8_nxv4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i8(<vscale x 4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x half> %evec
+}
+
+declare <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
+define <vscale x 4 x half> @vuitofp_nxv4i8_nxv4f16(<vscale x 4 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv4i8_nxv4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i8(<vscale x 4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x half> %evec
+}
+
+declare <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
+define <vscale x 4 x float> @vsitofp_nxv4i8_nxv4f32(<vscale x 4 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv4i8_nxv4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i8(<vscale x 4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x float> %evec
+}
+
+declare <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
+define <vscale x 4 x float> @vuitofp_nxv4i8_nxv4f32(<vscale x 4 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv4i8_nxv4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i8(<vscale x 4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x float> %evec
+}
+
+declare <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
+define <vscale x 4 x double> @vsitofp_nxv4i8_nxv4f64(<vscale x 4 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv4i8_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i8(<vscale x 4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x double> %evec
+}
+
+declare <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
+define <vscale x 4 x double> @vuitofp_nxv4i8_nxv4f64(<vscale x 4 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv4i8_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v12, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i8(<vscale x 4 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x double> %evec
+}
+
+declare <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
+define <vscale x 8 x half> @vsitofp_nxv8i8_nxv8f16(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv8i8_nxv8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v10, v8
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i8(<vscale x 8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x half> %evec
+}
+
+declare <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
+define <vscale x 8 x half> @vuitofp_nxv8i8_nxv8f16(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv8i8_nxv8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v10, v8
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i8(<vscale x 8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x half> %evec
+}
+
+declare <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
+define <vscale x 8 x float> @vsitofp_nxv8i8_nxv8f32(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv8i8_nxv8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i8(<vscale x 8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x float> %evec
+}
+
+declare <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
+define <vscale x 8 x float> @vuitofp_nxv8i8_nxv8f32(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv8i8_nxv8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i8(<vscale x 8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x float> %evec
+}
+
+declare <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
+define <vscale x 8 x double> @vsitofp_nxv8i8_nxv8f64(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv8i8_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i8(<vscale x 8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x double> %evec
+}
+
+declare <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
+define <vscale x 8 x double> @vuitofp_nxv8i8_nxv8f64(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv8i8_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v16, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i8(<vscale x 8 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x double> %evec
+}
+
+declare <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i8(<vscale x 16 x i8>, metadata, metadata)
+define <vscale x 16 x half> @vsitofp_nxv16i8_nxv16f16(<vscale x 16 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv16i8_nxv16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v12, v8
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i8(<vscale x 16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 16 x half> %evec
+}
+
+declare <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i8(<vscale x 16 x i8>, metadata, metadata)
+define <vscale x 16 x half> @vuitofp_nxv16i8_nxv16f16(<vscale x 16 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv16i8_nxv16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v12, v8
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i8(<vscale x 16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 16 x half> %evec
+}
+
+declare <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i8(<vscale x 16 x i8>, metadata, metadata)
+define <vscale x 16 x float> @vsitofp_nxv16i8_nxv16f32(<vscale x 16 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv16i8_nxv16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i8(<vscale x 16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 16 x float> %evec
+}
+
+declare <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i8(<vscale x 16 x i8>, metadata, metadata)
+define <vscale x 16 x float> @vuitofp_nxv16i8_nxv16f32(<vscale x 16 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv16i8_nxv16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v16, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i8(<vscale x 16 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 16 x float> %evec
+}
+
+declare <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i8(<vscale x 32 x i8>, metadata, metadata)
+define <vscale x 32 x half> @vsitofp_nxv32i8_nxv32f16(<vscale x 32 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv32i8_nxv32f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v16, v8
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i8(<vscale x 32 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 32 x half> %evec
+}
+
+declare <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i8(<vscale x 32 x i8>, metadata, metadata)
+define <vscale x 32 x half> @vuitofp_nxv32i8_nxv32f16(<vscale x 32 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv32i8_nxv32f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v16, v8
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i8(<vscale x 32 x i8> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 32 x half> %evec
+}
+
+declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
+define <vscale x 1 x half> @vsitofp_nxv1i16_nxv1f16(<vscale x 1 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv1i16_nxv1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i16(<vscale x 1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x half> %evec
+}
+
+declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
+define <vscale x 1 x half> @vuitofp_nxv1i16_nxv1f16(<vscale x 1 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv1i16_nxv1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i16(<vscale x 1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x half> %evec
+}
+
+declare <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
+define <vscale x 1 x float> @vsitofp_nxv1i16_nxv1f32(<vscale x 1 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv1i16_nxv1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i16(<vscale x 1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x float> %evec
+}
+
+declare <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
+define <vscale x 1 x float> @vuitofp_nxv1i16_nxv1f32(<vscale x 1 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv1i16_nxv1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i16(<vscale x 1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x float> %evec
+}
+
+declare <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
+define <vscale x 1 x double> @vsitofp_nxv1i16_nxv1f64(<vscale x 1 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv1i16_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v9, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i16(<vscale x 1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x double> %evec
+}
+
+declare <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
+define <vscale x 1 x double> @vuitofp_nxv1i16_nxv1f64(<vscale x 1 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv1i16_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v9, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i16(<vscale x 1 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x double> %evec
+}
+
+declare <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
+define <vscale x 2 x half> @vsitofp_nxv2i16_nxv2f16(<vscale x 2 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv2i16_nxv2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i16(<vscale x 2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x half> %evec
+}
+
+declare <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
+define <vscale x 2 x half> @vuitofp_nxv2i16_nxv2f16(<vscale x 2 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv2i16_nxv2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i16(<vscale x 2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x half> %evec
+}
+
+declare <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
+define <vscale x 2 x float> @vsitofp_nxv2i16_nxv2f32(<vscale x 2 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv2i16_nxv2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i16(<vscale x 2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x float> %evec
+}
+
+declare <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
+define <vscale x 2 x float> @vuitofp_nxv2i16_nxv2f32(<vscale x 2 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv2i16_nxv2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i16(<vscale x 2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x float> %evec
+}
+
+declare <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
+define <vscale x 2 x double> @vsitofp_nxv2i16_nxv2f64(<vscale x 2 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv2i16_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i16(<vscale x 2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x double> %evec
+}
+
+declare <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
+define <vscale x 2 x double> @vuitofp_nxv2i16_nxv2f64(<vscale x 2 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv2i16_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i16(<vscale x 2 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x double> %evec
+}
+
+declare <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
+define <vscale x 4 x half> @vsitofp_nxv4i16_nxv4f16(<vscale x 4 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv4i16_nxv4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i16(<vscale x 4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x half> %evec
+}
+
+declare <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
+define <vscale x 4 x half> @vuitofp_nxv4i16_nxv4f16(<vscale x 4 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv4i16_nxv4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i16(<vscale x 4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x half> %evec
+}
+
+declare <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
+define <vscale x 4 x float> @vsitofp_nxv4i16_nxv4f32(<vscale x 4 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv4i16_nxv4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v10, v8
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i16(<vscale x 4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x float> %evec
+}
+
+declare <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
+define <vscale x 4 x float> @vuitofp_nxv4i16_nxv4f32(<vscale x 4 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv4i16_nxv4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v10, v8
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i16(<vscale x 4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x float> %evec
+}
+
+declare <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
+define <vscale x 4 x double> @vsitofp_nxv4i16_nxv4f64(<vscale x 4 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv4i16_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i16(<vscale x 4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x double> %evec
+}
+
+declare <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
+define <vscale x 4 x double> @vuitofp_nxv4i16_nxv4f64(<vscale x 4 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv4i16_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i16(<vscale x 4 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x double> %evec
+}
+
+declare <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
+define <vscale x 8 x half> @vsitofp_nxv8i16_nxv8f16(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv8i16_nxv8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i16(<vscale x 8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x half> %evec
+}
+
+declare <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
+define <vscale x 8 x half> @vuitofp_nxv8i16_nxv8f16(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv8i16_nxv8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i16(<vscale x 8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x half> %evec
+}
+
+declare <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
+define <vscale x 8 x float> @vsitofp_nxv8i16_nxv8f32(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv8i16_nxv8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v12, v8
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i16(<vscale x 8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x float> %evec
+}
+
+declare <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
+define <vscale x 8 x float> @vuitofp_nxv8i16_nxv8f32(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv8i16_nxv8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v12, v8
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i16(<vscale x 8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x float> %evec
+}
+
+declare <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
+define <vscale x 8 x double> @vsitofp_nxv8i16_nxv8f64(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv8i16_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vfwcvt.f.x.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i16(<vscale x 8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x double> %evec
+}
+
+declare <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
+define <vscale x 8 x double> @vuitofp_nxv8i16_nxv8f64(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv8i16_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v16, v8
+; CHECK-NEXT: vfwcvt.f.xu.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i16(<vscale x 8 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x double> %evec
+}
+
+declare <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i16(<vscale x 16 x i16>, metadata, metadata)
+define <vscale x 16 x half> @vsitofp_nxv16i16_nxv16f16(<vscale x 16 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv16i16_nxv16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i16(<vscale x 16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 16 x half> %evec
+}
+
+declare <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i16(<vscale x 16 x i16>, metadata, metadata)
+define <vscale x 16 x half> @vuitofp_nxv16i16_nxv16f16(<vscale x 16 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv16i16_nxv16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i16(<vscale x 16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 16 x half> %evec
+}
+
+declare <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i16(<vscale x 16 x i16>, metadata, metadata)
+define <vscale x 16 x float> @vsitofp_nxv16i16_nxv16f32(<vscale x 16 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv16i16_nxv16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v16, v8
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i16(<vscale x 16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 16 x float> %evec
+}
+
+declare <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i16(<vscale x 16 x i16>, metadata, metadata)
+define <vscale x 16 x float> @vuitofp_nxv16i16_nxv16f32(<vscale x 16 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv16i16_nxv16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v16, v8
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i16(<vscale x 16 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 16 x float> %evec
+}
+
+declare <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i16(<vscale x 32 x i16>, metadata, metadata)
+define <vscale x 32 x half> @vsitofp_nxv32i16_nxv32f16(<vscale x 32 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv32i16_nxv32f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i16(<vscale x 32 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 32 x half> %evec
+}
+
+declare <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i16(<vscale x 32 x i16>, metadata, metadata)
+define <vscale x 32 x half> @vuitofp_nxv32i16_nxv32f16(<vscale x 32 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv32i16_nxv32f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i16(<vscale x 32 x i16> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 32 x half> %evec
+}
+
+declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
+define <vscale x 1 x half> @vsitofp_nxv1i32_nxv1f16(<vscale x 1 x i32> %va) {
+; CHECK-LABEL: vsitofp_nxv1i32_nxv1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i32(<vscale x 1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x half> %evec
+}
+
+declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
+define <vscale x 1 x half> @vuitofp_nxv1i32_nxv1f16(<vscale x 1 x i32> %va) {
+; CHECK-LABEL: vuitofp_nxv1i32_nxv1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i32(<vscale x 1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x half> %evec
+}
+
+declare <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
+define <vscale x 1 x float> @vsitofp_nxv1i32_nxv1f32(<vscale x 1 x i32> %va) {
+; CHECK-LABEL: vsitofp_nxv1i32_nxv1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i32(<vscale x 1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x float> %evec
+}
+
+declare <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
+define <vscale x 1 x float> @vuitofp_nxv1i32_nxv1f32(<vscale x 1 x i32> %va) {
+; CHECK-LABEL: vuitofp_nxv1i32_nxv1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i32(<vscale x 1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x float> %evec
+}
+
+declare <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
+define <vscale x 1 x double> @vsitofp_nxv1i32_nxv1f64(<vscale x 1 x i32> %va) {
+; CHECK-LABEL: vsitofp_nxv1i32_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i32(<vscale x 1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x double> %evec
+}
+
+declare <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
+define <vscale x 1 x double> @vuitofp_nxv1i32_nxv1f64(<vscale x 1 x i32> %va) {
+; CHECK-LABEL: vuitofp_nxv1i32_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i32(<vscale x 1 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x double> %evec
+}
+
+declare <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
+define <vscale x 2 x half> @vsitofp_nxv2i32_nxv2f16(<vscale x 2 x i32> %va) {
+; CHECK-LABEL: vsitofp_nxv2i32_nxv2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i32(<vscale x 2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x half> %evec
+}
+
+declare <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
+define <vscale x 2 x half> @vuitofp_nxv2i32_nxv2f16(<vscale x 2 x i32> %va) {
+; CHECK-LABEL: vuitofp_nxv2i32_nxv2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i32(<vscale x 2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x half> %evec
+}
+
+declare <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
+define <vscale x 2 x float> @vsitofp_nxv2i32_nxv2f32(<vscale x 2 x i32> %va) {
+; CHECK-LABEL: vsitofp_nxv2i32_nxv2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i32(<vscale x 2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x float> %evec
+}
+
+declare <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
+define <vscale x 2 x float> @vuitofp_nxv2i32_nxv2f32(<vscale x 2 x i32> %va) {
+; CHECK-LABEL: vuitofp_nxv2i32_nxv2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i32(<vscale x 2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x float> %evec
+}
+
+declare <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
+define <vscale x 2 x double> @vsitofp_nxv2i32_nxv2f64(<vscale x 2 x i32> %va) {
+; CHECK-LABEL: vsitofp_nxv2i32_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v10, v8
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i32(<vscale x 2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x double> %evec
+}
+
+declare <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
+define <vscale x 2 x double> @vuitofp_nxv2i32_nxv2f64(<vscale x 2 x i32> %va) {
+; CHECK-LABEL: vuitofp_nxv2i32_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v10, v8
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i32(<vscale x 2 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x double> %evec
+}
+
+declare <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
+define <vscale x 4 x half> @vsitofp_nxv4i32_nxv4f16(<vscale x 4 x i32> %va) {
+; CHECK-LABEL: vsitofp_nxv4i32_nxv4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i32(<vscale x 4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x half> %evec
+}
+
+declare <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
+define <vscale x 4 x half> @vuitofp_nxv4i32_nxv4f16(<vscale x 4 x i32> %va) {
+; CHECK-LABEL: vuitofp_nxv4i32_nxv4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i32(<vscale x 4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x half> %evec
+}
+
+declare <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
+define <vscale x 4 x float> @vsitofp_nxv4i32_nxv4f32(<vscale x 4 x i32> %va) {
+; CHECK-LABEL: vsitofp_nxv4i32_nxv4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i32(<vscale x 4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x float> %evec
+}
+
+declare <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
+define <vscale x 4 x float> @vuitofp_nxv4i32_nxv4f32(<vscale x 4 x i32> %va) {
+; CHECK-LABEL: vuitofp_nxv4i32_nxv4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i32(<vscale x 4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x float> %evec
+}
+
+declare <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
+define <vscale x 4 x double> @vsitofp_nxv4i32_nxv4f64(<vscale x 4 x i32> %va) {
+; CHECK-LABEL: vsitofp_nxv4i32_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v12, v8
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i32(<vscale x 4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x double> %evec
+}
+
+declare <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
+define <vscale x 4 x double> @vuitofp_nxv4i32_nxv4f64(<vscale x 4 x i32> %va) {
+; CHECK-LABEL: vuitofp_nxv4i32_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v12, v8
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i32(<vscale x 4 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x double> %evec
+}
+
+declare <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
+define <vscale x 8 x half> @vsitofp_nxv8i32_nxv8f16(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: vsitofp_nxv8i32_nxv8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i32(<vscale x 8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x half> %evec
+}
+
+declare <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
+define <vscale x 8 x half> @vuitofp_nxv8i32_nxv8f16(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: vuitofp_nxv8i32_nxv8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i32(<vscale x 8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x half> %evec
+}
+
+declare <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
+define <vscale x 8 x float> @vsitofp_nxv8i32_nxv8f32(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: vsitofp_nxv8i32_nxv8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i32(<vscale x 8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x float> %evec
+}
+
+declare <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
+define <vscale x 8 x float> @vuitofp_nxv8i32_nxv8f32(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: vuitofp_nxv8i32_nxv8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i32(<vscale x 8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x float> %evec
+}
+
+declare <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
+define <vscale x 8 x double> @vsitofp_nxv8i32_nxv8f64(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: vsitofp_nxv8i32_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v16, v8
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i32(<vscale x 8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x double> %evec
+}
+
+declare <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
+define <vscale x 8 x double> @vuitofp_nxv8i32_nxv8f64(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: vuitofp_nxv8i32_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfwcvt.f.xu.v v16, v8
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i32(<vscale x 8 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x double> %evec
+}
+
+declare <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i32(<vscale x 16 x i32>, metadata, metadata)
+define <vscale x 16 x half> @vsitofp_nxv16i32_nxv16f16(<vscale x 16 x i32> %va) {
+; CHECK-LABEL: vsitofp_nxv16i32_nxv16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v16, v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i32(<vscale x 16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 16 x half> %evec
+}
+
+declare <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i32(<vscale x 16 x i32>, metadata, metadata)
+define <vscale x 16 x half> @vuitofp_nxv16i32_nxv16f16(<vscale x 16 x i32> %va) {
+; CHECK-LABEL: vuitofp_nxv16i32_nxv16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v16, v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i32(<vscale x 16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 16 x half> %evec
+}
+
+declare <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i32(<vscale x 16 x i32>, metadata, metadata)
+define <vscale x 16 x float> @vsitofp_nxv16i32_nxv16f32(<vscale x 16 x i32> %va) {
+; CHECK-LABEL: vsitofp_nxv16i32_nxv16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i32(<vscale x 16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 16 x float> %evec
+}
+
+declare <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i32(<vscale x 16 x i32>, metadata, metadata)
+define <vscale x 16 x float> @vuitofp_nxv16i32_nxv16f32(<vscale x 16 x i32> %va) {
+; CHECK-LABEL: vuitofp_nxv16i32_nxv16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i32(<vscale x 16 x i32> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 16 x float> %evec
+}
+
+declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
+define <vscale x 1 x half> @vsitofp_nxv1i64_nxv1f16(<vscale x 1 x i64> %va) {
+; CHECK-LABEL: vsitofp_nxv1i64_nxv1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.f.f.w v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i64(<vscale x 1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x half> %evec
+}
+
+declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
+define <vscale x 1 x half> @vuitofp_nxv1i64_nxv1f16(<vscale x 1 x i64> %va) {
+; CHECK-LABEL: vuitofp_nxv1i64_nxv1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.f.f.w v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i64(<vscale x 1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x half> %evec
+}
+
+declare <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
+define <vscale x 1 x float> @vsitofp_nxv1i64_nxv1f32(<vscale x 1 x i64> %va) {
+; CHECK-LABEL: vsitofp_nxv1i64_nxv1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i64(<vscale x 1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x float> %evec
+}
+
+declare <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
+define <vscale x 1 x float> @vuitofp_nxv1i64_nxv1f32(<vscale x 1 x i64> %va) {
+; CHECK-LABEL: vuitofp_nxv1i64_nxv1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v9, v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i64(<vscale x 1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x float> %evec
+}
+
+declare <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
+define <vscale x 1 x double> @vsitofp_nxv1i64_nxv1f64(<vscale x 1 x i64> %va) {
+; CHECK-LABEL: vsitofp_nxv1i64_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i64(<vscale x 1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x double> %evec
+}
+
+declare <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
+define <vscale x 1 x double> @vuitofp_nxv1i64_nxv1f64(<vscale x 1 x i64> %va) {
+; CHECK-LABEL: vuitofp_nxv1i64_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i64(<vscale x 1 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 1 x double> %evec
+}
+
+
+declare <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
+define <vscale x 2 x half> @vsitofp_nxv2i64_nxv2f16(<vscale x 2 x i64> %va) {
+; CHECK-LABEL: vsitofp_nxv2i64_nxv2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfncvt.f.f.w v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i64(<vscale x 2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x half> %evec
+}
+
+declare <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
+define <vscale x 2 x half> @vuitofp_nxv2i64_nxv2f16(<vscale x 2 x i64> %va) {
+; CHECK-LABEL: vuitofp_nxv2i64_nxv2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vfncvt.f.f.w v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i64(<vscale x 2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x half> %evec
+}
+
+declare <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
+define <vscale x 2 x float> @vsitofp_nxv2i64_nxv2f32(<vscale x 2 x i64> %va) {
+; CHECK-LABEL: vsitofp_nxv2i64_nxv2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x float> %evec
+}
+
+declare <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
+define <vscale x 2 x float> @vuitofp_nxv2i64_nxv2f32(<vscale x 2 x i64> %va) {
+; CHECK-LABEL: vuitofp_nxv2i64_nxv2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v10, v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x float> %evec
+}
+
+declare <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
+define <vscale x 2 x double> @vsitofp_nxv2i64_nxv2f64(<vscale x 2 x i64> %va) {
+; CHECK-LABEL: vsitofp_nxv2i64_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i64(<vscale x 2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x double> %evec
+}
+
+declare <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
+define <vscale x 2 x double> @vuitofp_nxv2i64_nxv2f64(<vscale x 2 x i64> %va) {
+; CHECK-LABEL: vuitofp_nxv2i64_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i64(<vscale x 2 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 2 x double> %evec
+}
+
+declare <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
+define <vscale x 4 x half> @vsitofp_nxv4i64_nxv4f16(<vscale x 4 x i64> %va) {
+; CHECK-LABEL: vsitofp_nxv4i64_nxv4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.f.f.w v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i64(<vscale x 4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x half> %evec
+}
+
+declare <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
+define <vscale x 4 x half> @vuitofp_nxv4i64_nxv4f16(<vscale x 4 x i64> %va) {
+; CHECK-LABEL: vuitofp_nxv4i64_nxv4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v12, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvt.f.f.w v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i64(<vscale x 4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x half> %evec
+}
+
+declare <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
+define <vscale x 4 x float> @vsitofp_nxv4i64_nxv4f32(<vscale x 4 x i64> %va) {
+; CHECK-LABEL: vsitofp_nxv4i64_nxv4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i64(<vscale x 4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x float> %evec
+}
+
+declare <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
+define <vscale x 4 x float> @vuitofp_nxv4i64_nxv4f32(<vscale x 4 x i64> %va) {
+; CHECK-LABEL: vuitofp_nxv4i64_nxv4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v12, v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i64(<vscale x 4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x float> %evec
+}
+
+declare <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
+define <vscale x 4 x double> @vsitofp_nxv4i64_nxv4f64(<vscale x 4 x i64> %va) {
+; CHECK-LABEL: vsitofp_nxv4i64_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i64(<vscale x 4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x double> %evec
+}
+
+declare <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
+define <vscale x 4 x double> @vuitofp_nxv4i64_nxv4f64(<vscale x 4 x i64> %va) {
+; CHECK-LABEL: vuitofp_nxv4i64_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i64(<vscale x 4 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 4 x double> %evec
+}
+
+declare <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
+define <vscale x 8 x half> @vsitofp_nxv8i64_nxv8f16(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: vsitofp_nxv8i64_nxv8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v16, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfncvt.f.f.w v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i64(<vscale x 8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x half> %evec
+}
+
+declare <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
+define <vscale x 8 x half> @vuitofp_nxv8i64_nxv8f16(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: vuitofp_nxv8i64_nxv8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v16, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfncvt.f.f.w v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i64(<vscale x 8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x half> %evec
+}
+
+declare <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
+define <vscale x 8 x float> @vsitofp_nxv8i64_nxv8f32(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: vsitofp_nxv8i64_nxv8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v16, v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i64(<vscale x 8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x float> %evec
+}
+
+declare <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
+define <vscale x 8 x float> @vuitofp_nxv8i64_nxv8f32(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: vuitofp_nxv8i64_nxv8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfncvt.f.xu.w v16, v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i64(<vscale x 8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x float> %evec
+}
+
+declare <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
+define <vscale x 8 x double> @vsitofp_nxv8i64_nxv8f64(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: vsitofp_nxv8i64_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i64(<vscale x 8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x double> %evec
+}
+
+declare <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
+define <vscale x 8 x double> @vuitofp_nxv8i64_nxv8f64(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: vuitofp_nxv8i64_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT: vfcvt.f.xu.v v8, v8
+; CHECK-NEXT: ret
+ %evec = call <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i64(<vscale x 8 x i64> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <vscale x 8 x double> %evec
+}
More information about the llvm-commits
mailing list