[llvm] 9637e95 - [RISCV] Support ISD::STRICT_FADD/FSUB/FMUL/FDIV for vector types.

Yeting Kuo via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 14 16:47:25 PDT 2023


Author: Yeting Kuo
Date: 2023-03-15T07:47:16+08:00
New Revision: 9637e950cb3a623baab2156c2cb97a930fbaeec4

URL: https://github.com/llvm/llvm-project/commit/9637e950cb3a623baab2156c2cb97a930fbaeec4
DIFF: https://github.com/llvm/llvm-project/commit/9637e950cb3a623baab2156c2cb97a930fbaeec4.diff

LOG: [RISCV] Support ISD::STRICT_FADD/FSUB/FMUL/FDIV for vector types.

The patch handles fixed type strict-fp by new RISCVISD::STRICT_ prefixed
isd nodes.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D145900

Added: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.h
    llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index c3c63ff45e9b9..7846feebfe2b9 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -805,6 +805,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
       setOperationAction(FloatingPointVPOps, VT, Custom);
 
       setOperationAction(ISD::STRICT_FP_EXTEND, VT, Custom);
+      setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL,
+                          ISD::STRICT_FDIV},
+                         VT, Legal);
     };
 
     // Sets common extload/truncstore actions on RVV floating-point vector
@@ -1019,6 +1022,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
         setOperationAction(FloatingPointVPOps, VT, Custom);
 
         setOperationAction(ISD::STRICT_FP_EXTEND, VT, Custom);
+        setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB,
+                            ISD::STRICT_FMUL, ISD::STRICT_FDIV},
+                           VT, Custom);
       }
 
       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
@@ -4430,6 +4436,18 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
   case ISD::FCOPYSIGN:
     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
+  case ISD::STRICT_FADD:
+    return lowerToScalableOp(Op, DAG, RISCVISD::STRICT_FADD_VL,
+                             /*HasMergeOp*/ true);
+  case ISD::STRICT_FSUB:
+    return lowerToScalableOp(Op, DAG, RISCVISD::STRICT_FSUB_VL,
+                             /*HasMergeOp*/ true);
+  case ISD::STRICT_FMUL:
+    return lowerToScalableOp(Op, DAG, RISCVISD::STRICT_FMUL_VL,
+                             /*HasMergeOp*/ true);
+  case ISD::STRICT_FDIV:
+    return lowerToScalableOp(Op, DAG, RISCVISD::STRICT_FDIV_VL,
+                             /*HasMergeOp*/ true);
   case ISD::MGATHER:
   case ISD::VP_GATHER:
     return lowerMaskedGather(Op, DAG);
@@ -7345,6 +7363,16 @@ SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
     Ops.push_back(Mask);
   Ops.push_back(VL);
 
+  // StrictFP operations have two result values. Their lowered result should
+  // have same result count.
+  if (Op->isStrictFPOpcode()) {
+    SDValue ScalableRes =
+        DAG.getNode(NewOpc, DL, DAG.getVTList(ContainerVT, MVT::Other), Ops,
+                    Op->getFlags());
+    SDValue SubVec = convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
+    return DAG.getMergeValues({SubVec, ScalableRes.getValue(1)}, DL);
+  }
+
   SDValue ScalableRes =
       DAG.getNode(NewOpc, DL, ContainerVT, Ops, Op->getFlags());
   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
@@ -13995,8 +14023,12 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
   NODE_NAME_CASE(VFCVT_RM_F_XU_VL)
   NODE_NAME_CASE(VFCVT_RM_F_X_VL)
   NODE_NAME_CASE(FP_EXTEND_VL)
-  NODE_NAME_CASE(STRICT_FP_EXTEND_VL)
   NODE_NAME_CASE(FP_ROUND_VL)
+  NODE_NAME_CASE(STRICT_FADD_VL)
+  NODE_NAME_CASE(STRICT_FSUB_VL)
+  NODE_NAME_CASE(STRICT_FMUL_VL)
+  NODE_NAME_CASE(STRICT_FDIV_VL)
+  NODE_NAME_CASE(STRICT_FP_EXTEND_VL)
   NODE_NAME_CASE(VWMUL_VL)
   NODE_NAME_CASE(VWMULU_VL)
   NODE_NAME_CASE(VWMULSU_VL)

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 5f96e28383108..b3a202476751d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -330,6 +330,10 @@ enum NodeType : unsigned {
   // result being sign extended to 64 bit. These saturate out of range inputs.
   STRICT_FCVT_W_RV64 = ISD::FIRST_TARGET_STRICTFP_OPCODE,
   STRICT_FCVT_WU_RV64,
+  STRICT_FADD_VL,
+  STRICT_FSUB_VL,
+  STRICT_FMUL_VL,
+  STRICT_FDIV_VL,
   STRICT_FP_EXTEND_VL,
 
   // WARNING: Do not add anything in the end unless you want the node to

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 4af99d4669469..23fb5364ab165 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -72,7 +72,7 @@ multiclass VPatUSLoadStoreMaskSDNode<MTypeInfo m>
             (store_instr VR:$rs2, GPR:$rs1, m.AVL, m.Log2SEW)>;
 }
 
-class VPatBinarySDNode_VV<SDNode vop,
+class VPatBinarySDNode_VV<SDPatternOperator vop,
                           string instruction_name,
                           ValueType result_type,
                           ValueType op_type,
@@ -88,7 +88,7 @@ class VPatBinarySDNode_VV<SDNode vop,
                      op_reg_class:$rs2,
                      avl, sew)>;
 
-class VPatBinarySDNode_XI<SDNode vop,
+class VPatBinarySDNode_XI<SDPatternOperator vop,
                           string instruction_name,
                           string suffix,
                           ValueType result_type,
@@ -107,7 +107,7 @@ class VPatBinarySDNode_XI<SDNode vop,
                      xop_kind:$rs2,
                      avl, sew)>;
 
-multiclass VPatBinarySDNode_VV_VX<SDNode vop, string instruction_name> {
+multiclass VPatBinarySDNode_VV_VX<SDPatternOperator vop, string instruction_name> {
   foreach vti = AllIntegerVectors in {
     def : VPatBinarySDNode_VV<vop, instruction_name,
                               vti.Vector, vti.Vector, vti.Log2SEW,
@@ -119,7 +119,7 @@ multiclass VPatBinarySDNode_VV_VX<SDNode vop, string instruction_name> {
   }
 }
 
-multiclass VPatBinarySDNode_VV_VX_VI<SDNode vop, string instruction_name,
+multiclass VPatBinarySDNode_VV_VX_VI<SDPatternOperator vop, string instruction_name,
                                      Operand ImmType = simm5>
     : VPatBinarySDNode_VV_VX<vop, instruction_name> {
   foreach vti = AllIntegerVectors in {
@@ -131,7 +131,7 @@ multiclass VPatBinarySDNode_VV_VX_VI<SDNode vop, string instruction_name,
   }
 }
 
-class VPatBinarySDNode_VF<SDNode vop,
+class VPatBinarySDNode_VF<SDPatternOperator vop,
                           string instruction_name,
                           ValueType result_type,
                           ValueType vop_type,
@@ -148,7 +148,7 @@ class VPatBinarySDNode_VF<SDNode vop,
                      (xop_type xop_kind:$rs2),
                      avl, sew)>;
 
-multiclass VPatBinaryFPSDNode_VV_VF<SDNode vop, string instruction_name> {
+multiclass VPatBinaryFPSDNode_VV_VF<SDPatternOperator vop, string instruction_name> {
   foreach vti = AllFloatVectors in {
     def : VPatBinarySDNode_VV<vop, instruction_name,
                               vti.Vector, vti.Vector, vti.Log2SEW,
@@ -160,7 +160,7 @@ multiclass VPatBinaryFPSDNode_VV_VF<SDNode vop, string instruction_name> {
   }
 }
 
-multiclass VPatBinaryFPSDNode_R_VF<SDNode vop, string instruction_name> {
+multiclass VPatBinaryFPSDNode_R_VF<SDPatternOperator vop, string instruction_name> {
   foreach fvti = AllFloatVectors in
     def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)),
                                 (fvti.Vector fvti.RegClass:$rs1))),
@@ -826,18 +826,18 @@ foreach mti = AllMasks in {
 let Predicates = [HasVInstructionsAnyF] in {
 
 // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
-defm : VPatBinaryFPSDNode_VV_VF<fadd, "PseudoVFADD">;
-defm : VPatBinaryFPSDNode_VV_VF<fsub, "PseudoVFSUB">;
-defm : VPatBinaryFPSDNode_R_VF<fsub, "PseudoVFRSUB">;
+defm : VPatBinaryFPSDNode_VV_VF<any_fadd, "PseudoVFADD">;
+defm : VPatBinaryFPSDNode_VV_VF<any_fsub, "PseudoVFSUB">;
+defm : VPatBinaryFPSDNode_R_VF<any_fsub, "PseudoVFRSUB">;
 
 // 13.3. Vector Widening Floating-Point Add/Subtract Instructions
 defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF<fadd, "PseudoVFWADD">;
 defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF<fsub, "PseudoVFWSUB">;
 
 // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
-defm : VPatBinaryFPSDNode_VV_VF<fmul, "PseudoVFMUL">;
-defm : VPatBinaryFPSDNode_VV_VF<fdiv, "PseudoVFDIV">;
-defm : VPatBinaryFPSDNode_R_VF<fdiv, "PseudoVFRDIV">;
+defm : VPatBinaryFPSDNode_VV_VF<any_fmul, "PseudoVFMUL">;
+defm : VPatBinaryFPSDNode_VV_VF<any_fdiv, "PseudoVFDIV">;
+defm : VPatBinaryFPSDNode_R_VF<any_fdiv, "PseudoVFRDIV">;
 
 // 13.5. Vector Widening Floating-Point Multiply Instructions
 defm : VPatWidenBinaryFPSDNode_VV_VF<fmul, "PseudoVFWMUL">;

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 0e4024111eb84..5087628749cb3 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -107,6 +107,24 @@ def riscv_fcopysign_vl : SDNode<"RISCVISD::FCOPYSIGN_VL", SDT_RISCVCopySign_VL>;
 def riscv_fminnum_vl   : SDNode<"RISCVISD::FMINNUM_VL",  SDT_RISCVFPBinOp_VL, [SDNPCommutative]>;
 def riscv_fmaxnum_vl   : SDNode<"RISCVISD::FMAXNUM_VL",  SDT_RISCVFPBinOp_VL, [SDNPCommutative]>;
 
+def riscv_strict_fadd_vl  : SDNode<"RISCVISD::STRICT_FADD_VL",  SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>;
+def riscv_strict_fsub_vl  : SDNode<"RISCVISD::STRICT_FSUB_VL",  SDT_RISCVFPBinOp_VL, [SDNPHasChain]>;
+def riscv_strict_fmul_vl  : SDNode<"RISCVISD::STRICT_FMUL_VL",  SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>;
+def riscv_strict_fdiv_vl  : SDNode<"RISCVISD::STRICT_FDIV_VL",  SDT_RISCVFPBinOp_VL, [SDNPHasChain]>;
+
+def any_riscv_fadd_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
+                        [(riscv_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
+                         (riscv_strict_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>;
+def any_riscv_fsub_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
+                        [(riscv_fsub_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
+                         (riscv_strict_fsub_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>;
+def any_riscv_fmul_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
+                        [(riscv_fmul_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
+                         (riscv_strict_fmul_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>;
+def any_riscv_fdiv_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
+                        [(riscv_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
+                         (riscv_strict_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>;
+
 def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>,
                                               SDTCisSameAs<0, 2>,
                                               SDTCisSameAs<0, 3>,
@@ -421,7 +439,7 @@ def sew16simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<16>", []>;
 def sew32simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<32>", []>;
 def sew64simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<64>", []>;
 
-multiclass VPatBinaryVL_V<SDNode vop,
+multiclass VPatBinaryVL_V<SDPatternOperator vop,
                           string instruction_name,
                           string suffix,
                           ValueType result_type,
@@ -480,7 +498,7 @@ multiclass VPatTiedBinaryNoMaskVL_V<SDNode vop,
                      GPR:$vl, sew, TAIL_UNDISTURBED_MASK_UNDISTURBED)>;
 }
 
-multiclass VPatBinaryVL_XI<SDNode vop,
+multiclass VPatBinaryVL_XI<SDPatternOperator vop,
                            string instruction_name,
                            string suffix,
                            ValueType result_type,
@@ -506,7 +524,7 @@ multiclass VPatBinaryVL_XI<SDNode vop,
                      (mask_type V0), GPR:$vl, sew, TAIL_AGNOSTIC)>;
 }
 
-multiclass VPatBinaryVL_VV_VX<SDNode vop, string instruction_name> {
+multiclass VPatBinaryVL_VV_VX<SDPatternOperator vop, string instruction_name> {
   foreach vti = AllIntegerVectors in {
     defm : VPatBinaryVL_V<vop, instruction_name, "VV",
                            vti.Vector, vti.Vector, vti.Vector, vti.Mask,
@@ -519,7 +537,7 @@ multiclass VPatBinaryVL_VV_VX<SDNode vop, string instruction_name> {
   }
 }
 
-multiclass VPatBinaryVL_VV_VX_VI<SDNode vop, string instruction_name,
+multiclass VPatBinaryVL_VV_VX_VI<SDPatternOperator vop, string instruction_name,
                                  Operand ImmType = simm5>
     : VPatBinaryVL_VV_VX<vop, instruction_name> {
   foreach vti = AllIntegerVectors in {
@@ -531,7 +549,7 @@ multiclass VPatBinaryVL_VV_VX_VI<SDNode vop, string instruction_name,
   }
 }
 
-multiclass VPatBinaryWVL_VV_VX<SDNode vop, string instruction_name> {
+multiclass VPatBinaryWVL_VV_VX<SDPatternOperator vop, string instruction_name> {
   foreach VtiToWti = AllWidenableIntVectors in {
     defvar vti = VtiToWti.Vti;
     defvar wti = VtiToWti.Wti;
@@ -545,7 +563,7 @@ multiclass VPatBinaryWVL_VV_VX<SDNode vop, string instruction_name> {
                            SplatPat, GPR>;
   }
 }
-multiclass VPatBinaryWVL_VV_VX_WV_WX<SDNode vop, SDNode vop_w,
+multiclass VPatBinaryWVL_VV_VX_WV_WX<SDPatternOperator vop, SDNode vop_w,
                                      string instruction_name>
     : VPatBinaryWVL_VV_VX<vop, instruction_name> {
   foreach VtiToWti = AllWidenableIntVectors in {
@@ -565,7 +583,7 @@ multiclass VPatBinaryWVL_VV_VX_WV_WX<SDNode vop, SDNode vop_w,
   }
 }
 
-multiclass VPatBinaryNVL_WV_WX_WI<SDNode vop, string instruction_name> {
+multiclass VPatBinaryNVL_WV_WX_WI<SDPatternOperator vop, string instruction_name> {
   foreach VtiToWti = AllWidenableIntVectors in {
     defvar vti = VtiToWti.Vti;
     defvar wti = VtiToWti.Wti;
@@ -585,7 +603,7 @@ multiclass VPatBinaryNVL_WV_WX_WI<SDNode vop, string instruction_name> {
   }
 }
 
-multiclass VPatBinaryVL_VF<SDNode vop,
+multiclass VPatBinaryVL_VF<SDPatternOperator vop,
                            string instruction_name,
                            ValueType result_type,
                            ValueType vop_type,
@@ -607,7 +625,7 @@ multiclass VPatBinaryVL_VF<SDNode vop,
                      (mask_type V0), GPR:$vl, sew, TAIL_AGNOSTIC)>;
 }
 
-multiclass VPatBinaryFPVL_VV_VF<SDNode vop, string instruction_name> {
+multiclass VPatBinaryFPVL_VV_VF<SDPatternOperator vop, string instruction_name> {
   foreach vti = AllFloatVectors in {
     defm : VPatBinaryVL_V<vop, instruction_name, "VV",
                           vti.Vector, vti.Vector, vti.Vector, vti.Mask,
@@ -620,7 +638,7 @@ multiclass VPatBinaryFPVL_VV_VF<SDNode vop, string instruction_name> {
   }
 }
 
-multiclass VPatBinaryFPVL_R_VF<SDNode vop, string instruction_name> {
+multiclass VPatBinaryFPVL_R_VF<SDPatternOperator vop, string instruction_name> {
   foreach fvti = AllFloatVectors in {
     def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2),
                                 fvti.RegClass:$rs1,
@@ -1612,18 +1630,18 @@ defm : VPatBinaryVL_VV_VX<riscv_usubsat_vl, "PseudoVSSUBU">;
 let Predicates = [HasVInstructionsAnyF] in {
 
 // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
-defm : VPatBinaryFPVL_VV_VF<riscv_fadd_vl, "PseudoVFADD">;
-defm : VPatBinaryFPVL_VV_VF<riscv_fsub_vl, "PseudoVFSUB">;
-defm : VPatBinaryFPVL_R_VF<riscv_fsub_vl, "PseudoVFRSUB">;
+defm : VPatBinaryFPVL_VV_VF<any_riscv_fadd_vl, "PseudoVFADD">;
+defm : VPatBinaryFPVL_VV_VF<any_riscv_fsub_vl, "PseudoVFSUB">;
+defm : VPatBinaryFPVL_R_VF<any_riscv_fsub_vl, "PseudoVFRSUB">;
 
 // 13.3. Vector Widening Floating-Point Add/Subtract Instructions
 defm : VPatWidenBinaryFPVL_VV_VF_WV_WF<riscv_fadd_vl, "PseudoVFWADD">;
 defm : VPatWidenBinaryFPVL_VV_VF_WV_WF<riscv_fsub_vl, "PseudoVFWSUB">;
 
 // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
-defm : VPatBinaryFPVL_VV_VF<riscv_fmul_vl, "PseudoVFMUL">;
-defm : VPatBinaryFPVL_VV_VF<riscv_fdiv_vl, "PseudoVFDIV">;
-defm : VPatBinaryFPVL_R_VF<riscv_fdiv_vl, "PseudoVFRDIV">;
+defm : VPatBinaryFPVL_VV_VF<any_riscv_fmul_vl, "PseudoVFMUL">;
+defm : VPatBinaryFPVL_VV_VF<any_riscv_fdiv_vl, "PseudoVFDIV">;
+defm : VPatBinaryFPVL_R_VF<any_riscv_fdiv_vl, "PseudoVFRDIV">;
 
 // 13.5. Vector Widening Floating-Point Multiply Instructions
 defm : VPatWidenBinaryFPVL_VV_VF<riscv_fmul_vl, riscv_fpextend_vl_oneuse, "PseudoVFWMUL">;

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-constrained-sdnode.ll
new file mode 100644
index 0000000000000..f7b51e8a7aea2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-constrained-sdnode.ll
@@ -0,0 +1,295 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128\
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128\
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+declare <2 x half> @llvm.experimental.constrained.fadd.v2f16(<2 x half>, <2 x half>, metadata, metadata)
+define <2 x half> @vfadd_vv_v2f16(<2 x half> %va, <2 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_v2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <2 x half> @llvm.experimental.constrained.fadd.v2f16(<2 x half> %va, <2 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x half> %vc
+}
+
+define <2 x half> @vfadd_vf_v2f16(<2 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_v2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <2 x half> poison, half %b, i32 0
+  %splat = shufflevector <2 x half> %head, <2 x half> poison, <2 x i32> zeroinitializer
+  %vc = call <2 x half> @llvm.experimental.constrained.fadd.v2f16(<2 x half> %va, <2 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x half> %vc
+}
+
+declare <4 x half> @llvm.experimental.constrained.fadd.v4f16(<4 x half>, <4 x half>, metadata, metadata)
+define <4 x half> @vfadd_vv_v4f16(<4 x half> %va, <4 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_v4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <4 x half> @llvm.experimental.constrained.fadd.v4f16(<4 x half> %va, <4 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x half> %vc
+}
+
+define <4 x half> @vfadd_vf_v4f16(<4 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_v4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <4 x half> poison, half %b, i32 0
+  %splat = shufflevector <4 x half> %head, <4 x half> poison, <4 x i32> zeroinitializer
+  %vc = call <4 x half> @llvm.experimental.constrained.fadd.v4f16(<4 x half> %va, <4 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x half> %vc
+}
+
+declare <8 x half> @llvm.experimental.constrained.fadd.v8f16(<8 x half>, <8 x half>, metadata, metadata)
+define <8 x half> @vfadd_vv_v8f16(<8 x half> %va, <8 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_v8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <8 x half> @llvm.experimental.constrained.fadd.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x half> %vc
+}
+
+define <8 x half> @vfadd_vf_v8f16(<8 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_v8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <8 x half> poison, half %b, i32 0
+  %splat = shufflevector <8 x half> %head, <8 x half> poison, <8 x i32> zeroinitializer
+  %vc = call <8 x half> @llvm.experimental.constrained.fadd.v8f16(<8 x half> %va, <8 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x half> %vc
+}
+
+declare <16 x half> @llvm.experimental.constrained.fadd.v16f16(<16 x half>, <16 x half>, metadata, metadata)
+define <16 x half> @vfadd_vv_v16f16(<16 x half> %va, <16 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_v16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <16 x half> @llvm.experimental.constrained.fadd.v16f16(<16 x half> %va, <16 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <16 x half> %vc
+}
+
+define <16 x half> @vfadd_vf_v16f16(<16 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_v16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <16 x half> poison, half %b, i32 0
+  %splat = shufflevector <16 x half> %head, <16 x half> poison, <16 x i32> zeroinitializer
+  %vc = call <16 x half> @llvm.experimental.constrained.fadd.v16f16(<16 x half> %va, <16 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <16 x half> %vc
+}
+
+declare <32 x half> @llvm.experimental.constrained.fadd.v32f16(<32 x half>, <32 x half>, metadata, metadata)
+define <32 x half> @vfadd_vv_v32f16(<32 x half> %va, <32 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_v32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <32 x half> @llvm.experimental.constrained.fadd.v32f16(<32 x half> %va, <32 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <32 x half> %vc
+}
+
+define <32 x half> @vfadd_vf_v32f16(<32 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_v32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <32 x half> poison, half %b, i32 0
+  %splat = shufflevector <32 x half> %head, <32 x half> poison, <32 x i32> zeroinitializer
+  %vc = call <32 x half> @llvm.experimental.constrained.fadd.v32f16(<32 x half> %va, <32 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <32 x half> %vc
+}
+
+declare <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float>, <2 x float>, metadata, metadata)
+define <2 x float> @vfadd_vv_v2f32(<2 x float> %va, <2 x float> %vb) {
+; CHECK-LABEL: vfadd_vv_v2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %va, <2 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x float> %vc
+}
+
+define <2 x float> @vfadd_vf_v2f32(<2 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_vf_v2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <2 x float> poison, float %b, i32 0
+  %splat = shufflevector <2 x float> %head, <2 x float> poison, <2 x i32> zeroinitializer
+  %vc = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %va, <2 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x float> %vc
+}
+
+declare <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float>, <4 x float>, metadata, metadata)
+define <4 x float> @vfadd_vv_v4f32(<4 x float> %va, <4 x float> %vb) {
+; CHECK-LABEL: vfadd_vv_v4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float> %va, <4 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x float> %vc
+}
+
+define <4 x float> @vfadd_vf_v4f32(<4 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_vf_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <4 x float> poison, float %b, i32 0
+  %splat = shufflevector <4 x float> %head, <4 x float> poison, <4 x i32> zeroinitializer
+  %vc = call <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float> %va, <4 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x float> %vc
+}
+
+declare <8 x float> @llvm.experimental.constrained.fadd.v8f32(<8 x float>, <8 x float>, metadata, metadata)
+define <8 x float> @vfadd_vv_v8f32(<8 x float> %va, <8 x float> %vb) {
+; CHECK-LABEL: vfadd_vv_v8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <8 x float> @llvm.experimental.constrained.fadd.v8f32(<8 x float> %va, <8 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x float> %vc
+}
+
+define <8 x float> @vfadd_vf_v8f32(<8 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_vf_v8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <8 x float> poison, float %b, i32 0
+  %splat = shufflevector <8 x float> %head, <8 x float> poison, <8 x i32> zeroinitializer
+  %vc = call <8 x float> @llvm.experimental.constrained.fadd.v8f32(<8 x float> %va, <8 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x float> %vc
+}
+
+declare <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float>, <16 x float>, metadata, metadata)
+define <16 x float> @vfadd_vv_v16f32(<16 x float> %va, <16 x float> %vb) {
+; CHECK-LABEL: vfadd_vv_v16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %va, <16 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <16 x float> %vc
+}
+
+define <16 x float> @vfadd_vf_v16f32(<16 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_vf_v16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <16 x float> poison, float %b, i32 0
+  %splat = shufflevector <16 x float> %head, <16 x float> poison, <16 x i32> zeroinitializer
+  %vc = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %va, <16 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <16 x float> %vc
+}
+
+declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2 x double>, metadata, metadata)
+define <2 x double> @vfadd_vv_v2f64(<2 x double> %va, <2 x double> %vb) {
+; CHECK-LABEL: vfadd_vv_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> %va, <2 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x double> %vc
+}
+
+define <2 x double> @vfadd_vf_v2f64(<2 x double> %va, double %b) {
+; CHECK-LABEL: vfadd_vf_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <2 x double> poison, double %b, i32 0
+  %splat = shufflevector <2 x double> %head, <2 x double> poison, <2 x i32> zeroinitializer
+  %vc = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> %va, <2 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x double> %vc
+}
+
+declare <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double>, <4 x double>, metadata, metadata)
+define <4 x double> @vfadd_vv_v4f64(<4 x double> %va, <4 x double> %vb) {
+; CHECK-LABEL: vfadd_vv_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double> %va, <4 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x double> %vc
+}
+
+define <4 x double> @vfadd_vf_v4f64(<4 x double> %va, double %b) {
+; CHECK-LABEL: vfadd_vf_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <4 x double> poison, double %b, i32 0
+  %splat = shufflevector <4 x double> %head, <4 x double> poison, <4 x i32> zeroinitializer
+  %vc = call <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double> %va, <4 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x double> %vc
+}
+
+declare <8 x double> @llvm.experimental.constrained.fadd.v8f64(<8 x double>, <8 x double>, metadata, metadata)
+define <8 x double> @vfadd_vv_v8f64(<8 x double> %va, <8 x double> %vb) {
+; CHECK-LABEL: vfadd_vv_v8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <8 x double> @llvm.experimental.constrained.fadd.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x double> %vc
+}
+
+define <8 x double> @vfadd_vf_v8f64(<8 x double> %va, double %b) {
+; CHECK-LABEL: vfadd_vf_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <8 x double> poison, double %b, i32 0
+  %splat = shufflevector <8 x double> %head, <8 x double> poison, <8 x i32> zeroinitializer
+  %vc = call <8 x double> @llvm.experimental.constrained.fadd.v8f64(<8 x double> %va, <8 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x double> %vc
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-constrained-sdnode.ll
new file mode 100644
index 0000000000000..900386b621ec5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-constrained-sdnode.ll
@@ -0,0 +1,331 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+declare <2 x half> @llvm.experimental.constrained.fdiv.v2f16(<2 x half>, <2 x half>, metadata, metadata)
+define <2 x half> @vfdiv_vv_v2f16(<2 x half> %va, <2 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_v2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <2 x half> @llvm.experimental.constrained.fdiv.v2f16(<2 x half> %va, <2 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x half> %vc
+}
+
+define <2 x half> @vfdiv_vf_v2f16(<2 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_v2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <2 x half> poison, half %b, i32 0
+  %splat = shufflevector <2 x half> %head, <2 x half> poison, <2 x i32> zeroinitializer
+  %vc = call <2 x half> @llvm.experimental.constrained.fdiv.v2f16(<2 x half> %va, <2 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x half> %vc
+}
+
+declare <4 x half> @llvm.experimental.constrained.fdiv.v4f16(<4 x half>, <4 x half>, metadata, metadata)
+define <4 x half> @vfdiv_vv_v4f16(<4 x half> %va, <4 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_v4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <4 x half> @llvm.experimental.constrained.fdiv.v4f16(<4 x half> %va, <4 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x half> %vc
+}
+
+define <4 x half> @vfdiv_vf_v4f16(<4 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_v4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <4 x half> poison, half %b, i32 0
+  %splat = shufflevector <4 x half> %head, <4 x half> poison, <4 x i32> zeroinitializer
+  %vc = call <4 x half> @llvm.experimental.constrained.fdiv.v4f16(<4 x half> %va, <4 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x half> %vc
+}
+
+declare <8 x half> @llvm.experimental.constrained.fdiv.v8f16(<8 x half>, <8 x half>, metadata, metadata)
+define <8 x half> @vfdiv_vv_v8f16(<8 x half> %va, <8 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_v8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <8 x half> @llvm.experimental.constrained.fdiv.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x half> %vc
+}
+
+define <8 x half> @vfdiv_vf_v8f16(<8 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_v8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <8 x half> poison, half %b, i32 0
+  %splat = shufflevector <8 x half> %head, <8 x half> poison, <8 x i32> zeroinitializer
+  %vc = call <8 x half> @llvm.experimental.constrained.fdiv.v8f16(<8 x half> %va, <8 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x half> %vc
+}
+
+define <8 x half> @vfdiv_fv_v8f16(<8 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_fv_v8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <8 x half> poison, half %b, i32 0
+  %splat = shufflevector <8 x half> %head, <8 x half> poison, <8 x i32> zeroinitializer
+  %vc = call <8 x half> @llvm.experimental.constrained.fdiv.v8f16(<8 x half> %splat, <8 x half> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x half> %vc
+}
+
+declare <16 x half> @llvm.experimental.constrained.fdiv.v16f16(<16 x half>, <16 x half>, metadata, metadata)
+define <16 x half> @vfdiv_vv_v16f16(<16 x half> %va, <16 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_v16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <16 x half> @llvm.experimental.constrained.fdiv.v16f16(<16 x half> %va, <16 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <16 x half> %vc
+}
+
+define <16 x half> @vfdiv_vf_v16f16(<16 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_v16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <16 x half> poison, half %b, i32 0
+  %splat = shufflevector <16 x half> %head, <16 x half> poison, <16 x i32> zeroinitializer
+  %vc = call <16 x half> @llvm.experimental.constrained.fdiv.v16f16(<16 x half> %va, <16 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <16 x half> %vc
+}
+
+declare <32 x half> @llvm.experimental.constrained.fdiv.v32f16(<32 x half>, <32 x half>, metadata, metadata)
+define <32 x half> @vfdiv_vv_v32f16(<32 x half> %va, <32 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_v32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <32 x half> @llvm.experimental.constrained.fdiv.v32f16(<32 x half> %va, <32 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <32 x half> %vc
+}
+
+define <32 x half> @vfdiv_vf_v32f16(<32 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_v32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <32 x half> poison, half %b, i32 0
+  %splat = shufflevector <32 x half> %head, <32 x half> poison, <32 x i32> zeroinitializer
+  %vc = call <32 x half> @llvm.experimental.constrained.fdiv.v32f16(<32 x half> %va, <32 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <32 x half> %vc
+}
+
+declare <2 x float> @llvm.experimental.constrained.fdiv.v2f32(<2 x float>, <2 x float>, metadata, metadata)
+define <2 x float> @vfdiv_vv_v2f32(<2 x float> %va, <2 x float> %vb) {
+; CHECK-LABEL: vfdiv_vv_v2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <2 x float> @llvm.experimental.constrained.fdiv.v2f32(<2 x float> %va, <2 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x float> %vc
+}
+
+define <2 x float> @vfdiv_vf_v2f32(<2 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_vf_v2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <2 x float> poison, float %b, i32 0
+  %splat = shufflevector <2 x float> %head, <2 x float> poison, <2 x i32> zeroinitializer
+  %vc = call <2 x float> @llvm.experimental.constrained.fdiv.v2f32(<2 x float> %va, <2 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x float> %vc
+}
+
+declare <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float>, <4 x float>, metadata, metadata)
+define <4 x float> @vfdiv_vv_v4f32(<4 x float> %va, <4 x float> %vb) {
+; CHECK-LABEL: vfdiv_vv_v4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float> %va, <4 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x float> %vc
+}
+
+define <4 x float> @vfdiv_vf_v4f32(<4 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_vf_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <4 x float> poison, float %b, i32 0
+  %splat = shufflevector <4 x float> %head, <4 x float> poison, <4 x i32> zeroinitializer
+  %vc = call <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float> %va, <4 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x float> %vc
+}
+
+declare <8 x float> @llvm.experimental.constrained.fdiv.v8f32(<8 x float>, <8 x float>, metadata, metadata)
+define <8 x float> @vfdiv_vv_v8f32(<8 x float> %va, <8 x float> %vb) {
+; CHECK-LABEL: vfdiv_vv_v8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <8 x float> @llvm.experimental.constrained.fdiv.v8f32(<8 x float> %va, <8 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x float> %vc
+}
+
+define <8 x float> @vfdiv_vf_v8f32(<8 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_vf_v8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <8 x float> poison, float %b, i32 0
+  %splat = shufflevector <8 x float> %head, <8 x float> poison, <8 x i32> zeroinitializer
+  %vc = call <8 x float> @llvm.experimental.constrained.fdiv.v8f32(<8 x float> %va, <8 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x float> %vc
+}
+
+define <8 x float> @vfdiv_fv_v8f32(<8 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_fv_v8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <8 x float> poison, float %b, i32 0
+  %splat = shufflevector <8 x float> %head, <8 x float> poison, <8 x i32> zeroinitializer
+  %vc = call <8 x float> @llvm.experimental.constrained.fdiv.v8f32(<8 x float> %splat, <8 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x float> %vc
+}
+
+declare <16 x float> @llvm.experimental.constrained.fdiv.v16f32(<16 x float>, <16 x float>, metadata, metadata)
+define <16 x float> @vfdiv_vv_v16f32(<16 x float> %va, <16 x float> %vb) {
+; CHECK-LABEL: vfdiv_vv_v16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <16 x float> @llvm.experimental.constrained.fdiv.v16f32(<16 x float> %va, <16 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <16 x float> %vc
+}
+
+define <16 x float> @vfdiv_vf_v16f32(<16 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_vf_v16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <16 x float> poison, float %b, i32 0
+  %splat = shufflevector <16 x float> %head, <16 x float> poison, <16 x i32> zeroinitializer
+  %vc = call <16 x float> @llvm.experimental.constrained.fdiv.v16f32(<16 x float> %va, <16 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <16 x float> %vc
+}
+
+declare <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double>, <2 x double>, metadata, metadata)
+define <2 x double> @vfdiv_vv_v2f64(<2 x double> %va, <2 x double> %vb) {
+; CHECK-LABEL: vfdiv_vv_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double> %va, <2 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x double> %vc
+}
+
+define <2 x double> @vfdiv_vf_v2f64(<2 x double> %va, double %b) {
+; CHECK-LABEL: vfdiv_vf_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <2 x double> poison, double %b, i32 0
+  %splat = shufflevector <2 x double> %head, <2 x double> poison, <2 x i32> zeroinitializer
+  %vc = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double> %va, <2 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x double> %vc
+}
+
+declare <4 x double> @llvm.experimental.constrained.fdiv.v4f64(<4 x double>, <4 x double>, metadata, metadata)
+define <4 x double> @vfdiv_vv_v4f64(<4 x double> %va, <4 x double> %vb) {
+; CHECK-LABEL: vfdiv_vv_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <4 x double> @llvm.experimental.constrained.fdiv.v4f64(<4 x double> %va, <4 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x double> %vc
+}
+
+define <4 x double> @vfdiv_vf_v4f64(<4 x double> %va, double %b) {
+; CHECK-LABEL: vfdiv_vf_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <4 x double> poison, double %b, i32 0
+  %splat = shufflevector <4 x double> %head, <4 x double> poison, <4 x i32> zeroinitializer
+  %vc = call <4 x double> @llvm.experimental.constrained.fdiv.v4f64(<4 x double> %va, <4 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x double> %vc
+}
+
+declare <8 x double> @llvm.experimental.constrained.fdiv.v8f64(<8 x double>, <8 x double>, metadata, metadata)
+define <8 x double> @vfdiv_vv_v8f64(<8 x double> %va, <8 x double> %vb) {
+; CHECK-LABEL: vfdiv_vv_v8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <8 x double> @llvm.experimental.constrained.fdiv.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x double> %vc
+}
+
+define <8 x double> @vfdiv_vf_v8f64(<8 x double> %va, double %b) {
+; CHECK-LABEL: vfdiv_vf_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <8 x double> poison, double %b, i32 0
+  %splat = shufflevector <8 x double> %head, <8 x double> poison, <8 x i32> zeroinitializer
+  %vc = call <8 x double> @llvm.experimental.constrained.fdiv.v8f64(<8 x double> %va, <8 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x double> %vc
+}
+
+define <8 x double> @vfdiv_fv_v8f64(<8 x double> %va, double %b) {
+; CHECK-LABEL: vfdiv_fv_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <8 x double> poison, double %b, i32 0
+  %splat = shufflevector <8 x double> %head, <8 x double> poison, <8 x i32> zeroinitializer
+  %vc = call <8 x double> @llvm.experimental.constrained.fdiv.v8f64(<8 x double> %splat, <8 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x double> %vc
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-constrained-sdnode.ll
new file mode 100644
index 0000000000000..13ae8831391ae
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-constrained-sdnode.ll
@@ -0,0 +1,367 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+declare <1 x half> @llvm.experimental.constrained.fmul.v1f16(<1 x half>, <1 x half>, metadata, metadata)
+define <1 x half> @vfmul_vv_v1f16(<1 x half> %va, <1 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_v1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <1 x half> @llvm.experimental.constrained.fmul.v1f16(<1 x half> %va, <1 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <1 x half> %vc
+}
+
+define <1 x half> @vfmul_vf_v1f16(<1 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_v1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <1 x half> poison, half %b, i32 0
+  %splat = shufflevector <1 x half> %head, <1 x half> poison, <1 x i32> zeroinitializer
+  %vc = call <1 x half> @llvm.experimental.constrained.fmul.v1f16(<1 x half> %va, <1 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <1 x half> %vc
+}
+
+declare <2 x half> @llvm.experimental.constrained.fmul.v2f16(<2 x half>, <2 x half>, metadata, metadata)
+define <2 x half> @vfmul_vv_v2f16(<2 x half> %va, <2 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_v2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <2 x half> @llvm.experimental.constrained.fmul.v2f16(<2 x half> %va, <2 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x half> %vc
+}
+
+define <2 x half> @vfmul_vf_v2f16(<2 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_v2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <2 x half> poison, half %b, i32 0
+  %splat = shufflevector <2 x half> %head, <2 x half> poison, <2 x i32> zeroinitializer
+  %vc = call <2 x half> @llvm.experimental.constrained.fmul.v2f16(<2 x half> %va, <2 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x half> %vc
+}
+
+declare <4 x half> @llvm.experimental.constrained.fmul.v4f16(<4 x half>, <4 x half>, metadata, metadata)
+define <4 x half> @vfmul_vv_v4f16(<4 x half> %va, <4 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_v4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <4 x half> @llvm.experimental.constrained.fmul.v4f16(<4 x half> %va, <4 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x half> %vc
+}
+
+define <4 x half> @vfmul_vf_v4f16(<4 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_v4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <4 x half> poison, half %b, i32 0
+  %splat = shufflevector <4 x half> %head, <4 x half> poison, <4 x i32> zeroinitializer
+  %vc = call <4 x half> @llvm.experimental.constrained.fmul.v4f16(<4 x half> %va, <4 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x half> %vc
+}
+
+declare <8 x half> @llvm.experimental.constrained.fmul.v8f16(<8 x half>, <8 x half>, metadata, metadata)
+define <8 x half> @vfmul_vv_v8f16(<8 x half> %va, <8 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_v8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <8 x half> @llvm.experimental.constrained.fmul.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x half> %vc
+}
+
+define <8 x half> @vfmul_vf_v8f16(<8 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_v8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <8 x half> poison, half %b, i32 0
+  %splat = shufflevector <8 x half> %head, <8 x half> poison, <8 x i32> zeroinitializer
+  %vc = call <8 x half> @llvm.experimental.constrained.fmul.v8f16(<8 x half> %va, <8 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x half> %vc
+}
+
+declare <16 x half> @llvm.experimental.constrained.fmul.v16f16(<16 x half>, <16 x half>, metadata, metadata)
+define <16 x half> @vfmul_vv_v16f16(<16 x half> %va, <16 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_v16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <16 x half> @llvm.experimental.constrained.fmul.v16f16(<16 x half> %va, <16 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <16 x half> %vc
+}
+
+define <16 x half> @vfmul_vf_v16f16(<16 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_v16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <16 x half> poison, half %b, i32 0
+  %splat = shufflevector <16 x half> %head, <16 x half> poison, <16 x i32> zeroinitializer
+  %vc = call <16 x half> @llvm.experimental.constrained.fmul.v16f16(<16 x half> %va, <16 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <16 x half> %vc
+}
+
+declare <32 x half> @llvm.experimental.constrained.fmul.v32f16(<32 x half>, <32 x half>, metadata, metadata)
+define <32 x half> @vfmul_vv_v32f16(<32 x half> %va, <32 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_v32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <32 x half> @llvm.experimental.constrained.fmul.v32f16(<32 x half> %va, <32 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <32 x half> %vc
+}
+
+define <32 x half> @vfmul_vf_v32f16(<32 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_v32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <32 x half> poison, half %b, i32 0
+  %splat = shufflevector <32 x half> %head, <32 x half> poison, <32 x i32> zeroinitializer
+  %vc = call <32 x half> @llvm.experimental.constrained.fmul.v32f16(<32 x half> %va, <32 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <32 x half> %vc
+}
+
+declare <1 x float> @llvm.experimental.constrained.fmul.v1f32(<1 x float>, <1 x float>, metadata, metadata)
+define <1 x float> @vfmul_vv_v1f32(<1 x float> %va, <1 x float> %vb) {
+; CHECK-LABEL: vfmul_vv_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <1 x float> @llvm.experimental.constrained.fmul.v1f32(<1 x float> %va, <1 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <1 x float> %vc
+}
+
+define <1 x float> @vfmul_vf_v1f32(<1 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_vf_v1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <1 x float> poison, float %b, i32 0
+  %splat = shufflevector <1 x float> %head, <1 x float> poison, <1 x i32> zeroinitializer
+  %vc = call <1 x float> @llvm.experimental.constrained.fmul.v1f32(<1 x float> %va, <1 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <1 x float> %vc
+}
+
+declare <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float>, <2 x float>, metadata, metadata)
+define <2 x float> @vfmul_vv_v2f32(<2 x float> %va, <2 x float> %vb) {
+; CHECK-LABEL: vfmul_vv_v2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float> %va, <2 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x float> %vc
+}
+
+define <2 x float> @vfmul_vf_v2f32(<2 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_vf_v2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <2 x float> poison, float %b, i32 0
+  %splat = shufflevector <2 x float> %head, <2 x float> poison, <2 x i32> zeroinitializer
+  %vc = call <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float> %va, <2 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x float> %vc
+}
+
+declare <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float>, <4 x float>, metadata, metadata)
+define <4 x float> @vfmul_vv_v4f32(<4 x float> %va, <4 x float> %vb) {
+; CHECK-LABEL: vfmul_vv_v4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float> %va, <4 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x float> %vc
+}
+
+define <4 x float> @vfmul_vf_v4f32(<4 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_vf_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <4 x float> poison, float %b, i32 0
+  %splat = shufflevector <4 x float> %head, <4 x float> poison, <4 x i32> zeroinitializer
+  %vc = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float> %va, <4 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x float> %vc
+}
+
+declare <8 x float> @llvm.experimental.constrained.fmul.v8f32(<8 x float>, <8 x float>, metadata, metadata)
+define <8 x float> @vfmul_vv_v8f32(<8 x float> %va, <8 x float> %vb) {
+; CHECK-LABEL: vfmul_vv_v8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <8 x float> @llvm.experimental.constrained.fmul.v8f32(<8 x float> %va, <8 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x float> %vc
+}
+
+define <8 x float> @vfmul_vf_v8f32(<8 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_vf_v8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <8 x float> poison, float %b, i32 0
+  %splat = shufflevector <8 x float> %head, <8 x float> poison, <8 x i32> zeroinitializer
+  %vc = call <8 x float> @llvm.experimental.constrained.fmul.v8f32(<8 x float> %va, <8 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x float> %vc
+}
+
+declare <16 x float> @llvm.experimental.constrained.fmul.v16f32(<16 x float>, <16 x float>, metadata, metadata)
+define <16 x float> @vfmul_vv_v16f32(<16 x float> %va, <16 x float> %vb) {
+; CHECK-LABEL: vfmul_vv_v16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <16 x float> @llvm.experimental.constrained.fmul.v16f32(<16 x float> %va, <16 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <16 x float> %vc
+}
+
+define <16 x float> @vfmul_vf_v16f32(<16 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_vf_v16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <16 x float> poison, float %b, i32 0
+  %splat = shufflevector <16 x float> %head, <16 x float> poison, <16 x i32> zeroinitializer
+  %vc = call <16 x float> @llvm.experimental.constrained.fmul.v16f32(<16 x float> %va, <16 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <16 x float> %vc
+}
+
+declare <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double>, <1 x double>, metadata, metadata)
+define <1 x double> @vfmul_vv_v1f64(<1 x double> %va, <1 x double> %vb) {
+; CHECK-LABEL: vfmul_vv_v1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double> %va, <1 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <1 x double> %vc
+}
+
+define <1 x double> @vfmul_vf_v1f64(<1 x double> %va, double %b) {
+; CHECK-LABEL: vfmul_vf_v1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <1 x double> poison, double %b, i32 0
+  %splat = shufflevector <1 x double> %head, <1 x double> poison, <1 x i32> zeroinitializer
+  %vc = call <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double> %va, <1 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <1 x double> %vc
+}
+
+declare <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double>, <2 x double>, metadata, metadata)
+define <2 x double> @vfmul_vv_v2f64(<2 x double> %va, <2 x double> %vb) {
+; CHECK-LABEL: vfmul_vv_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %va, <2 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x double> %vc
+}
+
+define <2 x double> @vfmul_vf_v2f64(<2 x double> %va, double %b) {
+; CHECK-LABEL: vfmul_vf_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <2 x double> poison, double %b, i32 0
+  %splat = shufflevector <2 x double> %head, <2 x double> poison, <2 x i32> zeroinitializer
+  %vc = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %va, <2 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x double> %vc
+}
+
+declare <4 x double> @llvm.experimental.constrained.fmul.v4f64(<4 x double>, <4 x double>, metadata, metadata)
+define <4 x double> @vfmul_vv_v4f64(<4 x double> %va, <4 x double> %vb) {
+; CHECK-LABEL: vfmul_vv_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <4 x double> @llvm.experimental.constrained.fmul.v4f64(<4 x double> %va, <4 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x double> %vc
+}
+
+define <4 x double> @vfmul_vf_v4f64(<4 x double> %va, double %b) {
+; CHECK-LABEL: vfmul_vf_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <4 x double> poison, double %b, i32 0
+  %splat = shufflevector <4 x double> %head, <4 x double> poison, <4 x i32> zeroinitializer
+  %vc = call <4 x double> @llvm.experimental.constrained.fmul.v4f64(<4 x double> %va, <4 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x double> %vc
+}
+
+declare <8 x double> @llvm.experimental.constrained.fmul.v8f64(<8 x double>, <8 x double>, metadata, metadata)
+define <8 x double> @vfmul_vv_v8f64(<8 x double> %va, <8 x double> %vb) {
+; CHECK-LABEL: vfmul_vv_v8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <8 x double> @llvm.experimental.constrained.fmul.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x double> %vc
+}
+
+define <8 x double> @vfmul_vf_v8f64(<8 x double> %va, double %b) {
+; CHECK-LABEL: vfmul_vf_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <8 x double> poison, double %b, i32 0
+  %splat = shufflevector <8 x double> %head, <8 x double> poison, <8 x i32> zeroinitializer
+  %vc = call <8 x double> @llvm.experimental.constrained.fmul.v8f64(<8 x double> %va, <8 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x double> %vc
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-constrained-sdnode.ll
new file mode 100644
index 0000000000000..50e6e2dc51154
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-constrained-sdnode.ll
@@ -0,0 +1,331 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+declare <2 x half> @llvm.experimental.constrained.fsub.v2f16(<2 x half>, <2 x half>, metadata, metadata)
+define <2 x half> @vfsub_vv_v2f16(<2 x half> %va, <2 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_v2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <2 x half> @llvm.experimental.constrained.fsub.v2f16(<2 x half> %va, <2 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x half> %vc
+}
+
+define <2 x half> @vfsub_vf_v2f16(<2 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_v2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <2 x half> poison, half %b, i32 0
+  %splat = shufflevector <2 x half> %head, <2 x half> poison, <2 x i32> zeroinitializer
+  %vc = call <2 x half> @llvm.experimental.constrained.fsub.v2f16(<2 x half> %va, <2 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x half> %vc
+}
+
+declare <4 x half> @llvm.experimental.constrained.fsub.v4f16(<4 x half>, <4 x half>, metadata, metadata)
+define <4 x half> @vfsub_vv_v4f16(<4 x half> %va, <4 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_v4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <4 x half> @llvm.experimental.constrained.fsub.v4f16(<4 x half> %va, <4 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x half> %vc
+}
+
+define <4 x half> @vfsub_vf_v4f16(<4 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_v4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <4 x half> poison, half %b, i32 0
+  %splat = shufflevector <4 x half> %head, <4 x half> poison, <4 x i32> zeroinitializer
+  %vc = call <4 x half> @llvm.experimental.constrained.fsub.v4f16(<4 x half> %va, <4 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x half> %vc
+}
+
+declare <8 x half> @llvm.experimental.constrained.fsub.v8f16(<8 x half>, <8 x half>, metadata, metadata)
+define <8 x half> @vfsub_vv_v8f16(<8 x half> %va, <8 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_v8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <8 x half> @llvm.experimental.constrained.fsub.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x half> %vc
+}
+
+define <8 x half> @vfsub_vf_v8f16(<8 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_v8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <8 x half> poison, half %b, i32 0
+  %splat = shufflevector <8 x half> %head, <8 x half> poison, <8 x i32> zeroinitializer
+  %vc = call <8 x half> @llvm.experimental.constrained.fsub.v8f16(<8 x half> %va, <8 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x half> %vc
+}
+
+define <8 x half> @vfsub_fv_v8f16(<8 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_fv_v8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <8 x half> poison, half %b, i32 0
+  %splat = shufflevector <8 x half> %head, <8 x half> poison, <8 x i32> zeroinitializer
+  %vc = call <8 x half> @llvm.experimental.constrained.fsub.v8f16(<8 x half> %splat, <8 x half> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x half> %vc
+}
+
+declare <16 x half> @llvm.experimental.constrained.fsub.v16f16(<16 x half>, <16 x half>, metadata, metadata)
+define <16 x half> @vfsub_vv_v16f16(<16 x half> %va, <16 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_v16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <16 x half> @llvm.experimental.constrained.fsub.v16f16(<16 x half> %va, <16 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <16 x half> %vc
+}
+
+define <16 x half> @vfsub_vf_v16f16(<16 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_v16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <16 x half> poison, half %b, i32 0
+  %splat = shufflevector <16 x half> %head, <16 x half> poison, <16 x i32> zeroinitializer
+  %vc = call <16 x half> @llvm.experimental.constrained.fsub.v16f16(<16 x half> %va, <16 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <16 x half> %vc
+}
+
+declare <32 x half> @llvm.experimental.constrained.fsub.v32f16(<32 x half>, <32 x half>, metadata, metadata)
+define <32 x half> @vfsub_vv_v32f16(<32 x half> %va, <32 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_v32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <32 x half> @llvm.experimental.constrained.fsub.v32f16(<32 x half> %va, <32 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <32 x half> %vc
+}
+
+define <32 x half> @vfsub_vf_v32f16(<32 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_v32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <32 x half> poison, half %b, i32 0
+  %splat = shufflevector <32 x half> %head, <32 x half> poison, <32 x i32> zeroinitializer
+  %vc = call <32 x half> @llvm.experimental.constrained.fsub.v32f16(<32 x half> %va, <32 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <32 x half> %vc
+}
+
+declare <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float>, <2 x float>, metadata, metadata)
+define <2 x float> @vfsub_vv_v2f32(<2 x float> %va, <2 x float> %vb) {
+; CHECK-LABEL: vfsub_vv_v2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> %va, <2 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x float> %vc
+}
+
+define <2 x float> @vfsub_vf_v2f32(<2 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_vf_v2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <2 x float> poison, float %b, i32 0
+  %splat = shufflevector <2 x float> %head, <2 x float> poison, <2 x i32> zeroinitializer
+  %vc = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> %va, <2 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x float> %vc
+}
+
+declare <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float>, <4 x float>, metadata, metadata)
+define <4 x float> @vfsub_vv_v4f32(<4 x float> %va, <4 x float> %vb) {
+; CHECK-LABEL: vfsub_vv_v4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float> %va, <4 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x float> %vc
+}
+
+define <4 x float> @vfsub_vf_v4f32(<4 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_vf_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <4 x float> poison, float %b, i32 0
+  %splat = shufflevector <4 x float> %head, <4 x float> poison, <4 x i32> zeroinitializer
+  %vc = call <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float> %va, <4 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x float> %vc
+}
+
+declare <8 x float> @llvm.experimental.constrained.fsub.v8f32(<8 x float>, <8 x float>, metadata, metadata)
+define <8 x float> @vfsub_vv_v8f32(<8 x float> %va, <8 x float> %vb) {
+; CHECK-LABEL: vfsub_vv_v8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <8 x float> @llvm.experimental.constrained.fsub.v8f32(<8 x float> %va, <8 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x float> %vc
+}
+
+define <8 x float> @vfsub_vf_v8f32(<8 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_vf_v8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <8 x float> poison, float %b, i32 0
+  %splat = shufflevector <8 x float> %head, <8 x float> poison, <8 x i32> zeroinitializer
+  %vc = call <8 x float> @llvm.experimental.constrained.fsub.v8f32(<8 x float> %va, <8 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x float> %vc
+}
+
+define <8 x float> @vfsub_fv_v8f32(<8 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_fv_v8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <8 x float> poison, float %b, i32 0
+  %splat = shufflevector <8 x float> %head, <8 x float> poison, <8 x i32> zeroinitializer
+  %vc = call <8 x float> @llvm.experimental.constrained.fsub.v8f32(<8 x float> %splat, <8 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x float> %vc
+}
+
+declare <16 x float> @llvm.experimental.constrained.fsub.v16f32(<16 x float>, <16 x float>, metadata, metadata)
+define <16 x float> @vfsub_vv_v16f32(<16 x float> %va, <16 x float> %vb) {
+; CHECK-LABEL: vfsub_vv_v16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <16 x float> @llvm.experimental.constrained.fsub.v16f32(<16 x float> %va, <16 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <16 x float> %vc
+}
+
+define <16 x float> @vfsub_vf_v16f32(<16 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_vf_v16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <16 x float> poison, float %b, i32 0
+  %splat = shufflevector <16 x float> %head, <16 x float> poison, <16 x i32> zeroinitializer
+  %vc = call <16 x float> @llvm.experimental.constrained.fsub.v16f32(<16 x float> %va, <16 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <16 x float> %vc
+}
+
+declare <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double>, <2 x double>, metadata, metadata)
+define <2 x double> @vfsub_vv_v2f64(<2 x double> %va, <2 x double> %vb) {
+; CHECK-LABEL: vfsub_vv_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double> %va, <2 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x double> %vc
+}
+
+define <2 x double> @vfsub_vf_v2f64(<2 x double> %va, double %b) {
+; CHECK-LABEL: vfsub_vf_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <2 x double> poison, double %b, i32 0
+  %splat = shufflevector <2 x double> %head, <2 x double> poison, <2 x i32> zeroinitializer
+  %vc = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double> %va, <2 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <2 x double> %vc
+}
+
+declare <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double>, <4 x double>, metadata, metadata)
+define <4 x double> @vfsub_vv_v4f64(<4 x double> %va, <4 x double> %vb) {
+; CHECK-LABEL: vfsub_vv_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double> %va, <4 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x double> %vc
+}
+
+define <4 x double> @vfsub_vf_v4f64(<4 x double> %va, double %b) {
+; CHECK-LABEL: vfsub_vf_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <4 x double> poison, double %b, i32 0
+  %splat = shufflevector <4 x double> %head, <4 x double> poison, <4 x i32> zeroinitializer
+  %vc = call <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double> %va, <4 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <4 x double> %vc
+}
+
+declare <8 x double> @llvm.experimental.constrained.fsub.v8f64(<8 x double>, <8 x double>, metadata, metadata)
+define <8 x double> @vfsub_vv_v8f64(<8 x double> %va, <8 x double> %vb) {
+; CHECK-LABEL: vfsub_vv_v8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <8 x double> @llvm.experimental.constrained.fsub.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x double> %vc
+}
+
+define <8 x double> @vfsub_vf_v8f64(<8 x double> %va, double %b) {
+; CHECK-LABEL: vfsub_vf_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <8 x double> poison, double %b, i32 0
+  %splat = shufflevector <8 x double> %head, <8 x double> poison, <8 x i32> zeroinitializer
+  %vc = call <8 x double> @llvm.experimental.constrained.fsub.v8f64(<8 x double> %va, <8 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x double> %vc
+}
+
+define <8 x double> @vfsub_fv_v8f64(<8 x double> %va, double %b) {
+; CHECK-LABEL: vfsub_fv_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <8 x double> poison, double %b, i32 0
+  %splat = shufflevector <8 x double> %head, <8 x double> poison, <8 x i32> zeroinitializer
+  %vc = call <8 x double> @llvm.experimental.constrained.fsub.v8f64(<8 x double> %splat, <8 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <8 x double> %vc
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll
new file mode 100644
index 0000000000000..313a58efe902b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll
@@ -0,0 +1,365 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+declare <vscale x 1 x half> @llvm.experimental.constrained.fadd.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, metadata, metadata)
+define <vscale x 1 x half> @vfadd_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 1 x half> @llvm.experimental.constrained.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x half> %vc
+}
+
+define <vscale x 1 x half> @vfadd_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
+  %vc = call <vscale x 1 x half> @llvm.experimental.constrained.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x half> %vc
+}
+
+declare <vscale x 2 x half> @llvm.experimental.constrained.fadd.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, metadata, metadata)
+define <vscale x 2 x half> @vfadd_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 2 x half> @llvm.experimental.constrained.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x half> %vc
+}
+
+define <vscale x 2 x half> @vfadd_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
+  %vc = call <vscale x 2 x half> @llvm.experimental.constrained.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x half> %vc
+}
+
+declare <vscale x 4 x half> @llvm.experimental.constrained.fadd.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, metadata, metadata)
+define <vscale x 4 x half> @vfadd_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 4 x half> @llvm.experimental.constrained.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x half> %vc
+}
+
+define <vscale x 4 x half> @vfadd_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
+  %vc = call <vscale x 4 x half> @llvm.experimental.constrained.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x half> %vc
+}
+
+declare <vscale x 8 x half> @llvm.experimental.constrained.fadd.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, metadata, metadata)
+define <vscale x 8 x half> @vfadd_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 8 x half> @llvm.experimental.constrained.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfadd_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = call <vscale x 8 x half> @llvm.experimental.constrained.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x half> %vc
+}
+
+declare <vscale x 16 x half> @llvm.experimental.constrained.fadd.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, metadata, metadata)
+define <vscale x 16 x half> @vfadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 16 x half> @llvm.experimental.constrained.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 16 x half> %vc
+}
+
+define <vscale x 16 x half> @vfadd_vf_nxv16f16(<vscale x 16 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
+  %vc = call <vscale x 16 x half> @llvm.experimental.constrained.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 16 x half> %vc
+}
+
+declare <vscale x 32 x half> @llvm.experimental.constrained.fadd.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, metadata, metadata)
+define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 32 x half> @llvm.experimental.constrained.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 32 x half> %vc
+}
+
+define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 32 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
+  %vc = call <vscale x 32 x half> @llvm.experimental.constrained.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 32 x half> %vc
+}
+
+declare <vscale x 1 x float> @llvm.experimental.constrained.fadd.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, metadata, metadata)
+define <vscale x 1 x float> @vfadd_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 1 x float> @llvm.experimental.constrained.fadd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x float> %vc
+}
+
+define <vscale x 1 x float> @vfadd_vf_nxv1f32(<vscale x 1 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_vf_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
+  %vc = call <vscale x 1 x float> @llvm.experimental.constrained.fadd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x float> %vc
+}
+
+declare <vscale x 2 x float> @llvm.experimental.constrained.fadd.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, metadata, metadata)
+define <vscale x 2 x float> @vfadd_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 2 x float> @llvm.experimental.constrained.fadd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x float> %vc
+}
+
+define <vscale x 2 x float> @vfadd_vf_nxv2f32(<vscale x 2 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_vf_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
+  %vc = call <vscale x 2 x float> @llvm.experimental.constrained.fadd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x float> %vc
+}
+
+declare <vscale x 4 x float> @llvm.experimental.constrained.fadd.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, metadata, metadata)
+define <vscale x 4 x float> @vfadd_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 4 x float> @llvm.experimental.constrained.fadd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x float> %vc
+}
+
+define <vscale x 4 x float> @vfadd_vf_nxv4f32(<vscale x 4 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_vf_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
+  %vc = call <vscale x 4 x float> @llvm.experimental.constrained.fadd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x float> %vc
+}
+
+declare <vscale x 8 x float> @llvm.experimental.constrained.fadd.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, metadata, metadata)
+define <vscale x 8 x float> @vfadd_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 8 x float> @llvm.experimental.constrained.fadd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfadd_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = call <vscale x 8 x float> @llvm.experimental.constrained.fadd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x float> %vc
+}
+
+declare <vscale x 16 x float> @llvm.experimental.constrained.fadd.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, metadata, metadata)
+define <vscale x 16 x float> @vfadd_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 16 x float> @llvm.experimental.constrained.fadd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 16 x float> %vc
+}
+
+define <vscale x 16 x float> @vfadd_vf_nxv16f32(<vscale x 16 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_vf_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
+  %vc = call <vscale x 16 x float> @llvm.experimental.constrained.fadd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 16 x float> %vc
+}
+
+declare <vscale x 1 x double> @llvm.experimental.constrained.fadd.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, metadata, metadata)
+define <vscale x 1 x double> @vfadd_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 1 x double> @llvm.experimental.constrained.fadd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x double> %vc
+}
+
+define <vscale x 1 x double> @vfadd_vf_nxv1f64(<vscale x 1 x double> %va, double %b) {
+; CHECK-LABEL: vfadd_vf_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x double> poison, double %b, i32 0
+  %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
+  %vc = call <vscale x 1 x double> @llvm.experimental.constrained.fadd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x double> %vc
+}
+
+declare <vscale x 2 x double> @llvm.experimental.constrained.fadd.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, metadata, metadata)
+define <vscale x 2 x double> @vfadd_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 2 x double> @llvm.experimental.constrained.fadd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x double> %vc
+}
+
+define <vscale x 2 x double> @vfadd_vf_nxv2f64(<vscale x 2 x double> %va, double %b) {
+; CHECK-LABEL: vfadd_vf_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x double> poison, double %b, i32 0
+  %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
+  %vc = call <vscale x 2 x double> @llvm.experimental.constrained.fadd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x double> %vc
+}
+
+declare <vscale x 4 x double> @llvm.experimental.constrained.fadd.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, metadata, metadata)
+define <vscale x 4 x double> @vfadd_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 4 x double> @llvm.experimental.constrained.fadd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x double> %vc
+}
+
+define <vscale x 4 x double> @vfadd_vf_nxv4f64(<vscale x 4 x double> %va, double %b) {
+; CHECK-LABEL: vfadd_vf_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x double> poison, double %b, i32 0
+  %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
+  %vc = call <vscale x 4 x double> @llvm.experimental.constrained.fadd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x double> %vc
+}
+
+declare <vscale x 8 x double> @llvm.experimental.constrained.fadd.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, metadata, metadata)
+define <vscale x 8 x double> @vfadd_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vfadd.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 8 x double> @llvm.experimental.constrained.fadd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfadd_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfadd_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = call <vscale x 8 x double> @llvm.experimental.constrained.fadd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x double> %vc
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll
new file mode 100644
index 0000000000000..7c10ca993011d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll
@@ -0,0 +1,401 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+declare <vscale x 1 x half> @llvm.experimental.constrained.fdiv.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, metadata, metadata)
+define <vscale x 1 x half> @vfdiv_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 1 x half> @llvm.experimental.constrained.fdiv.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x half> %vc
+}
+
+define <vscale x 1 x half> @vfdiv_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
+  %vc = call <vscale x 1 x half> @llvm.experimental.constrained.fdiv.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x half> %vc
+}
+
+declare <vscale x 2 x half> @llvm.experimental.constrained.fdiv.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, metadata, metadata)
+define <vscale x 2 x half> @vfdiv_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 2 x half> @llvm.experimental.constrained.fdiv.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x half> %vc
+}
+
+define <vscale x 2 x half> @vfdiv_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
+  %vc = call <vscale x 2 x half> @llvm.experimental.constrained.fdiv.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x half> %vc
+}
+
+declare <vscale x 4 x half> @llvm.experimental.constrained.fdiv.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, metadata, metadata)
+define <vscale x 4 x half> @vfdiv_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 4 x half> @llvm.experimental.constrained.fdiv.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x half> %vc
+}
+
+define <vscale x 4 x half> @vfdiv_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
+  %vc = call <vscale x 4 x half> @llvm.experimental.constrained.fdiv.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x half> %vc
+}
+
+declare <vscale x 8 x half> @llvm.experimental.constrained.fdiv.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, metadata, metadata)
+define <vscale x 8 x half> @vfdiv_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 8 x half> @llvm.experimental.constrained.fdiv.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfdiv_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = call <vscale x 8 x half> @llvm.experimental.constrained.fdiv.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfdiv_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = call <vscale x 8 x half> @llvm.experimental.constrained.fdiv.nxv8f16(<vscale x 8 x half> %splat, <vscale x 8 x half> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x half> %vc
+}
+
+declare <vscale x 16 x half> @llvm.experimental.constrained.fdiv.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, metadata, metadata)
+define <vscale x 16 x half> @vfdiv_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 16 x half> @llvm.experimental.constrained.fdiv.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 16 x half> %vc
+}
+
+define <vscale x 16 x half> @vfdiv_vf_nxv16f16(<vscale x 16 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
+  %vc = call <vscale x 16 x half> @llvm.experimental.constrained.fdiv.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 16 x half> %vc
+}
+
+declare <vscale x 32 x half> @llvm.experimental.constrained.fdiv.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, metadata, metadata)
+define <vscale x 32 x half> @vfdiv_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 32 x half> @llvm.experimental.constrained.fdiv.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 32 x half> %vc
+}
+
+define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 32 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
+  %vc = call <vscale x 32 x half> @llvm.experimental.constrained.fdiv.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 32 x half> %vc
+}
+
+declare <vscale x 1 x float> @llvm.experimental.constrained.fdiv.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, metadata, metadata)
+define <vscale x 1 x float> @vfdiv_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 1 x float> @llvm.experimental.constrained.fdiv.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x float> %vc
+}
+
+define <vscale x 1 x float> @vfdiv_vf_nxv1f32(<vscale x 1 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_vf_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
+  %vc = call <vscale x 1 x float> @llvm.experimental.constrained.fdiv.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x float> %vc
+}
+
+declare <vscale x 2 x float> @llvm.experimental.constrained.fdiv.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, metadata, metadata)
+define <vscale x 2 x float> @vfdiv_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 2 x float> @llvm.experimental.constrained.fdiv.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x float> %vc
+}
+
+define <vscale x 2 x float> @vfdiv_vf_nxv2f32(<vscale x 2 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_vf_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
+  %vc = call <vscale x 2 x float> @llvm.experimental.constrained.fdiv.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x float> %vc
+}
+
+declare <vscale x 4 x float> @llvm.experimental.constrained.fdiv.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, metadata, metadata)
+define <vscale x 4 x float> @vfdiv_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 4 x float> @llvm.experimental.constrained.fdiv.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x float> %vc
+}
+
+define <vscale x 4 x float> @vfdiv_vf_nxv4f32(<vscale x 4 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_vf_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
+  %vc = call <vscale x 4 x float> @llvm.experimental.constrained.fdiv.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x float> %vc
+}
+
+declare <vscale x 8 x float> @llvm.experimental.constrained.fdiv.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, metadata, metadata)
+define <vscale x 8 x float> @vfdiv_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 8 x float> @llvm.experimental.constrained.fdiv.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfdiv_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = call <vscale x 8 x float> @llvm.experimental.constrained.fdiv.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfdiv_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = call <vscale x 8 x float> @llvm.experimental.constrained.fdiv.nxv8f32(<vscale x 8 x float> %splat, <vscale x 8 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x float> %vc
+}
+
+declare <vscale x 16 x float> @llvm.experimental.constrained.fdiv.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, metadata, metadata)
+define <vscale x 16 x float> @vfdiv_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 16 x float> @llvm.experimental.constrained.fdiv.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 16 x float> %vc
+}
+
+define <vscale x 16 x float> @vfdiv_vf_nxv16f32(<vscale x 16 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_vf_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
+  %vc = call <vscale x 16 x float> @llvm.experimental.constrained.fdiv.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 16 x float> %vc
+}
+
+declare <vscale x 1 x double> @llvm.experimental.constrained.fdiv.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, metadata, metadata)
+define <vscale x 1 x double> @vfdiv_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 1 x double> @llvm.experimental.constrained.fdiv.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x double> %vc
+}
+
+define <vscale x 1 x double> @vfdiv_vf_nxv1f64(<vscale x 1 x double> %va, double %b) {
+; CHECK-LABEL: vfdiv_vf_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x double> poison, double %b, i32 0
+  %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
+  %vc = call <vscale x 1 x double> @llvm.experimental.constrained.fdiv.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x double> %vc
+}
+
+declare <vscale x 2 x double> @llvm.experimental.constrained.fdiv.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, metadata, metadata)
+define <vscale x 2 x double> @vfdiv_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 2 x double> @llvm.experimental.constrained.fdiv.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x double> %vc
+}
+
+define <vscale x 2 x double> @vfdiv_vf_nxv2f64(<vscale x 2 x double> %va, double %b) {
+; CHECK-LABEL: vfdiv_vf_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x double> poison, double %b, i32 0
+  %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
+  %vc = call <vscale x 2 x double> @llvm.experimental.constrained.fdiv.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x double> %vc
+}
+
+declare <vscale x 4 x double> @llvm.experimental.constrained.fdiv.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, metadata, metadata)
+define <vscale x 4 x double> @vfdiv_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 4 x double> @llvm.experimental.constrained.fdiv.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x double> %vc
+}
+
+define <vscale x 4 x double> @vfdiv_vf_nxv4f64(<vscale x 4 x double> %va, double %b) {
+; CHECK-LABEL: vfdiv_vf_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x double> poison, double %b, i32 0
+  %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
+  %vc = call <vscale x 4 x double> @llvm.experimental.constrained.fdiv.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x double> %vc
+}
+
+declare <vscale x 8 x double> @llvm.experimental.constrained.fdiv.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, metadata, metadata)
+define <vscale x 8 x double> @vfdiv_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vfdiv.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 8 x double> @llvm.experimental.constrained.fdiv.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfdiv_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfdiv_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = call <vscale x 8 x double> @llvm.experimental.constrained.fdiv.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfdiv_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfdiv_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = call <vscale x 8 x double> @llvm.experimental.constrained.fdiv.nxv8f64(<vscale x 8 x double> %splat, <vscale x 8 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x double> %vc
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll
new file mode 100644
index 0000000000000..1c8a34b4eff00
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll
@@ -0,0 +1,365 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+declare <vscale x 1 x half> @llvm.experimental.constrained.fmul.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, metadata, metadata)
+define <vscale x 1 x half> @vfmul_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 1 x half> @llvm.experimental.constrained.fmul.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x half> %vc
+}
+
+define <vscale x 1 x half> @vfmul_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
+  %vc = call <vscale x 1 x half> @llvm.experimental.constrained.fmul.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x half> %vc
+}
+
+declare <vscale x 2 x half> @llvm.experimental.constrained.fmul.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, metadata, metadata)
+define <vscale x 2 x half> @vfmul_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 2 x half> @llvm.experimental.constrained.fmul.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x half> %vc
+}
+
+define <vscale x 2 x half> @vfmul_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
+  %vc = call <vscale x 2 x half> @llvm.experimental.constrained.fmul.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x half> %vc
+}
+
+declare <vscale x 4 x half> @llvm.experimental.constrained.fmul.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, metadata, metadata)
+define <vscale x 4 x half> @vfmul_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 4 x half> @llvm.experimental.constrained.fmul.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x half> %vc
+}
+
+define <vscale x 4 x half> @vfmul_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
+  %vc = call <vscale x 4 x half> @llvm.experimental.constrained.fmul.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x half> %vc
+}
+
+declare <vscale x 8 x half> @llvm.experimental.constrained.fmul.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, metadata, metadata)
+define <vscale x 8 x half> @vfmul_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 8 x half> @llvm.experimental.constrained.fmul.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfmul_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = call <vscale x 8 x half> @llvm.experimental.constrained.fmul.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x half> %vc
+}
+
+declare <vscale x 16 x half> @llvm.experimental.constrained.fmul.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, metadata, metadata)
+define <vscale x 16 x half> @vfmul_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 16 x half> @llvm.experimental.constrained.fmul.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 16 x half> %vc
+}
+
+define <vscale x 16 x half> @vfmul_vf_nxv16f16(<vscale x 16 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
+  %vc = call <vscale x 16 x half> @llvm.experimental.constrained.fmul.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 16 x half> %vc
+}
+
+declare <vscale x 32 x half> @llvm.experimental.constrained.fmul.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, metadata, metadata)
+define <vscale x 32 x half> @vfmul_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 32 x half> @llvm.experimental.constrained.fmul.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 32 x half> %vc
+}
+
+define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 32 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
+  %vc = call <vscale x 32 x half> @llvm.experimental.constrained.fmul.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 32 x half> %vc
+}
+
+declare <vscale x 1 x float> @llvm.experimental.constrained.fmul.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, metadata, metadata)
+define <vscale x 1 x float> @vfmul_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 1 x float> @llvm.experimental.constrained.fmul.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x float> %vc
+}
+
+define <vscale x 1 x float> @vfmul_vf_nxv1f32(<vscale x 1 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_vf_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
+  %vc = call <vscale x 1 x float> @llvm.experimental.constrained.fmul.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x float> %vc
+}
+
+declare <vscale x 2 x float> @llvm.experimental.constrained.fmul.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, metadata, metadata)
+define <vscale x 2 x float> @vfmul_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 2 x float> @llvm.experimental.constrained.fmul.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x float> %vc
+}
+
+define <vscale x 2 x float> @vfmul_vf_nxv2f32(<vscale x 2 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_vf_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
+  %vc = call <vscale x 2 x float> @llvm.experimental.constrained.fmul.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x float> %vc
+}
+
+declare <vscale x 4 x float> @llvm.experimental.constrained.fmul.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, metadata, metadata)
+define <vscale x 4 x float> @vfmul_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 4 x float> @llvm.experimental.constrained.fmul.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x float> %vc
+}
+
+define <vscale x 4 x float> @vfmul_vf_nxv4f32(<vscale x 4 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_vf_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
+  %vc = call <vscale x 4 x float> @llvm.experimental.constrained.fmul.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x float> %vc
+}
+
+declare <vscale x 8 x float> @llvm.experimental.constrained.fmul.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, metadata, metadata)
+define <vscale x 8 x float> @vfmul_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 8 x float> @llvm.experimental.constrained.fmul.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfmul_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = call <vscale x 8 x float> @llvm.experimental.constrained.fmul.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x float> %vc
+}
+
+declare <vscale x 16 x float> @llvm.experimental.constrained.fmul.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, metadata, metadata)
+define <vscale x 16 x float> @vfmul_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 16 x float> @llvm.experimental.constrained.fmul.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 16 x float> %vc
+}
+
+define <vscale x 16 x float> @vfmul_vf_nxv16f32(<vscale x 16 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_vf_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
+  %vc = call <vscale x 16 x float> @llvm.experimental.constrained.fmul.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 16 x float> %vc
+}
+
+declare <vscale x 1 x double> @llvm.experimental.constrained.fmul.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, metadata, metadata)
+define <vscale x 1 x double> @vfmul_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 1 x double> @llvm.experimental.constrained.fmul.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x double> %vc
+}
+
+define <vscale x 1 x double> @vfmul_vf_nxv1f64(<vscale x 1 x double> %va, double %b) {
+; CHECK-LABEL: vfmul_vf_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x double> poison, double %b, i32 0
+  %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
+  %vc = call <vscale x 1 x double> @llvm.experimental.constrained.fmul.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x double> %vc
+}
+
+declare <vscale x 2 x double> @llvm.experimental.constrained.fmul.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, metadata, metadata)
+define <vscale x 2 x double> @vfmul_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 2 x double> @llvm.experimental.constrained.fmul.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x double> %vc
+}
+
+define <vscale x 2 x double> @vfmul_vf_nxv2f64(<vscale x 2 x double> %va, double %b) {
+; CHECK-LABEL: vfmul_vf_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x double> poison, double %b, i32 0
+  %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
+  %vc = call <vscale x 2 x double> @llvm.experimental.constrained.fmul.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x double> %vc
+}
+
+declare <vscale x 4 x double> @llvm.experimental.constrained.fmul.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, metadata, metadata)
+define <vscale x 4 x double> @vfmul_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 4 x double> @llvm.experimental.constrained.fmul.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x double> %vc
+}
+
+define <vscale x 4 x double> @vfmul_vf_nxv4f64(<vscale x 4 x double> %va, double %b) {
+; CHECK-LABEL: vfmul_vf_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x double> poison, double %b, i32 0
+  %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
+  %vc = call <vscale x 4 x double> @llvm.experimental.constrained.fmul.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x double> %vc
+}
+
+declare <vscale x 8 x double> @llvm.experimental.constrained.fmul.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, metadata, metadata)
+define <vscale x 8 x double> @vfmul_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vfmul.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 8 x double> @llvm.experimental.constrained.fmul.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfmul_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfmul_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = call <vscale x 8 x double> @llvm.experimental.constrained.fmul.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x double> %vc
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll
new file mode 100644
index 0000000000000..f4e24a7b9442c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll
@@ -0,0 +1,401 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+declare <vscale x 1 x half> @llvm.experimental.constrained.fsub.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, metadata, metadata)
+define <vscale x 1 x half> @vfsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 1 x half> @llvm.experimental.constrained.fsub.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x half> %vc
+}
+
+define <vscale x 1 x half> @vfsub_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
+  %vc = call <vscale x 1 x half> @llvm.experimental.constrained.fsub.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x half> %vc
+}
+
+declare <vscale x 2 x half> @llvm.experimental.constrained.fsub.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, metadata, metadata)
+define <vscale x 2 x half> @vfsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 2 x half> @llvm.experimental.constrained.fsub.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x half> %vc
+}
+
+define <vscale x 2 x half> @vfsub_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
+  %vc = call <vscale x 2 x half> @llvm.experimental.constrained.fsub.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x half> %vc
+}
+
+declare <vscale x 4 x half> @llvm.experimental.constrained.fsub.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, metadata, metadata)
+define <vscale x 4 x half> @vfsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 4 x half> @llvm.experimental.constrained.fsub.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x half> %vc
+}
+
+define <vscale x 4 x half> @vfsub_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
+  %vc = call <vscale x 4 x half> @llvm.experimental.constrained.fsub.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x half> %vc
+}
+
+declare <vscale x 8 x half> @llvm.experimental.constrained.fsub.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, metadata, metadata)
+define <vscale x 8 x half> @vfsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 8 x half> @llvm.experimental.constrained.fsub.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfsub_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = call <vscale x 8 x half> @llvm.experimental.constrained.fsub.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfsub_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = call <vscale x 8 x half> @llvm.experimental.constrained.fsub.nxv8f16(<vscale x 8 x half> %splat, <vscale x 8 x half> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x half> %vc
+}
+
+declare <vscale x 16 x half> @llvm.experimental.constrained.fsub.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, metadata, metadata)
+define <vscale x 16 x half> @vfsub_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 16 x half> @llvm.experimental.constrained.fsub.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 16 x half> %vc
+}
+
+define <vscale x 16 x half> @vfsub_vf_nxv16f16(<vscale x 16 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
+  %vc = call <vscale x 16 x half> @llvm.experimental.constrained.fsub.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 16 x half> %vc
+}
+
+declare <vscale x 32 x half> @llvm.experimental.constrained.fsub.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, metadata, metadata)
+define <vscale x 32 x half> @vfsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 32 x half> @llvm.experimental.constrained.fsub.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 32 x half> %vc
+}
+
+define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 32 x half> poison, half %b, i32 0
+  %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
+  %vc = call <vscale x 32 x half> @llvm.experimental.constrained.fsub.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 32 x half> %vc
+}
+
+declare <vscale x 1 x float> @llvm.experimental.constrained.fsub.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, metadata, metadata)
+define <vscale x 1 x float> @vfsub_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 1 x float> @llvm.experimental.constrained.fsub.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x float> %vc
+}
+
+define <vscale x 1 x float> @vfsub_vf_nxv1f32(<vscale x 1 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_vf_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
+  %vc = call <vscale x 1 x float> @llvm.experimental.constrained.fsub.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x float> %vc
+}
+
+declare <vscale x 2 x float> @llvm.experimental.constrained.fsub.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, metadata, metadata)
+define <vscale x 2 x float> @vfsub_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 2 x float> @llvm.experimental.constrained.fsub.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x float> %vc
+}
+
+define <vscale x 2 x float> @vfsub_vf_nxv2f32(<vscale x 2 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_vf_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
+  %vc = call <vscale x 2 x float> @llvm.experimental.constrained.fsub.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x float> %vc
+}
+
+declare <vscale x 4 x float> @llvm.experimental.constrained.fsub.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, metadata, metadata)
+define <vscale x 4 x float> @vfsub_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 4 x float> @llvm.experimental.constrained.fsub.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x float> %vc
+}
+
+define <vscale x 4 x float> @vfsub_vf_nxv4f32(<vscale x 4 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_vf_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
+  %vc = call <vscale x 4 x float> @llvm.experimental.constrained.fsub.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x float> %vc
+}
+
+declare <vscale x 8 x float> @llvm.experimental.constrained.fsub.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, metadata, metadata)
+define <vscale x 8 x float> @vfsub_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 8 x float> @llvm.experimental.constrained.fsub.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfsub_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = call <vscale x 8 x float> @llvm.experimental.constrained.fsub.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfsub_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = call <vscale x 8 x float> @llvm.experimental.constrained.fsub.nxv8f32(<vscale x 8 x float> %splat, <vscale x 8 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x float> %vc
+}
+
+declare <vscale x 16 x float> @llvm.experimental.constrained.fsub.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, metadata, metadata)
+define <vscale x 16 x float> @vfsub_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 16 x float> @llvm.experimental.constrained.fsub.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 16 x float> %vc
+}
+
+define <vscale x 16 x float> @vfsub_vf_nxv16f32(<vscale x 16 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_vf_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x float> poison, float %b, i32 0
+  %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
+  %vc = call <vscale x 16 x float> @llvm.experimental.constrained.fsub.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 16 x float> %vc
+}
+
+declare <vscale x 1 x double> @llvm.experimental.constrained.fsub.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, metadata, metadata)
+define <vscale x 1 x double> @vfsub_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 1 x double> @llvm.experimental.constrained.fsub.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x double> %vc
+}
+
+define <vscale x 1 x double> @vfsub_vf_nxv1f64(<vscale x 1 x double> %va, double %b) {
+; CHECK-LABEL: vfsub_vf_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x double> poison, double %b, i32 0
+  %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
+  %vc = call <vscale x 1 x double> @llvm.experimental.constrained.fsub.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 1 x double> %vc
+}
+
+declare <vscale x 2 x double> @llvm.experimental.constrained.fsub.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, metadata, metadata)
+define <vscale x 2 x double> @vfsub_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 2 x double> @llvm.experimental.constrained.fsub.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x double> %vc
+}
+
+define <vscale x 2 x double> @vfsub_vf_nxv2f64(<vscale x 2 x double> %va, double %b) {
+; CHECK-LABEL: vfsub_vf_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x double> poison, double %b, i32 0
+  %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
+  %vc = call <vscale x 2 x double> @llvm.experimental.constrained.fsub.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 2 x double> %vc
+}
+
+declare <vscale x 4 x double> @llvm.experimental.constrained.fsub.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, metadata, metadata)
+define <vscale x 4 x double> @vfsub_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 4 x double> @llvm.experimental.constrained.fsub.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x double> %vc
+}
+
+define <vscale x 4 x double> @vfsub_vf_nxv4f64(<vscale x 4 x double> %va, double %b) {
+; CHECK-LABEL: vfsub_vf_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x double> poison, double %b, i32 0
+  %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
+  %vc = call <vscale x 4 x double> @llvm.experimental.constrained.fsub.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 4 x double> %vc
+}
+
+declare <vscale x 8 x double> @llvm.experimental.constrained.fsub.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, metadata, metadata)
+define <vscale x 8 x double> @vfsub_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vfsub.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %vc = call <vscale x 8 x double> @llvm.experimental.constrained.fsub.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfsub_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfsub_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = call <vscale x 8 x double> @llvm.experimental.constrained.fsub.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %splat, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfsub_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfsub_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = call <vscale x 8 x double> @llvm.experimental.constrained.fsub.nxv8f64(<vscale x 8 x double> %splat, <vscale x 8 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret <vscale x 8 x double> %vc
+}


        


More information about the llvm-commits mailing list