[llvm-branch-commits] [llvm] 7989684 - [RISCV] Add scalable vector fadd/fsub/fmul/fdiv ISel patterns

Fraser Cormack via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Mon Jan 11 13:30:15 PST 2021


Author: Fraser Cormack
Date: 2021-01-11T21:19:48Z
New Revision: 7989684a2e4a496201ff41d31cede764487ca80f

URL: https://github.com/llvm/llvm-project/commit/7989684a2e4a496201ff41d31cede764487ca80f
DIFF: https://github.com/llvm/llvm-project/commit/7989684a2e4a496201ff41d31cede764487ca80f.diff

LOG: [RISCV] Add scalable vector fadd/fsub/fmul/fdiv ISel patterns

Original patch by @rogfer01.

This patch adds ISel patterns for the above operations to the
corresponding vector/vector and vector/scalar RVV instructions, as well
as extra patterns to match operand-swapped scalar/vector vfrsub and
vfrdiv.

Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: Fraser Cormack <fraser at codeplay.com>

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D94408

Added: 
    llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode-rv64.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 404c3050e601..a5ecd5f83f34 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -132,6 +132,46 @@ multiclass VPatBinarySDNode_VV_VX_VI<SDNode vop, string instruction_name,
   }
 }
 
+class VPatBinarySDNode_VF<SDNode vop,
+                          string instruction_name,
+                          ValueType result_type,
+                          ValueType vop_type,
+                          ValueType xop_type,
+                          ValueType mask_type,
+                          int sew,
+                          LMULInfo vlmul,
+                          VReg RetClass,
+                          VReg vop_reg_class,
+                          DAGOperand xop_kind> :
+    Pat<(result_type (vop (vop_type vop_reg_class:$rs1),
+                          (vop_type (splat_vector xop_kind:$rs2)))),
+        (!cast<Instruction>(instruction_name#"_VF_"#vlmul.MX)
+                     vop_reg_class:$rs1,
+                     ToFPR32<xop_type, xop_kind, "rs2">.ret,
+                     VLMax, sew)>;
+
+multiclass VPatBinaryFPSDNode_VV_VF<SDNode vop, string instruction_name> {
+  foreach vti = AllFloatVectors in {
+    def : VPatBinarySDNode_VV<vop, instruction_name,
+                              vti.Vector, vti.Vector, vti.Mask, vti.SEW,
+                              vti.LMul, vti.RegClass, vti.RegClass>;
+    def : VPatBinarySDNode_VF<vop, instruction_name,
+                              vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
+                              vti.SEW, vti.LMul, vti.RegClass, vti.RegClass,
+                              vti.ScalarRegClass>;
+  }
+}
+
+multiclass VPatBinaryFPSDNode_R_VF<SDNode vop, string instruction_name> {
+  foreach fvti = AllFloatVectors in
+    def : Pat<(fvti.Vector (vop (fvti.Vector (splat_vector fvti.Scalar:$rs2)),
+                                (fvti.Vector fvti.RegClass:$rs1))),
+              (!cast<Instruction>(instruction_name#"_VF_"#fvti.LMul.MX)
+                           fvti.RegClass:$rs1,
+                           ToFPR32<fvti.Scalar, fvti.ScalarRegClass, "rs2">.ret,
+                           VLMax, fvti.SEW)>;
+}
+
 multiclass VPatIntegerSetCCSDNode_VV<CondCode cc,
                                      string instruction_name,
                                      bit swap = 0> {
@@ -340,6 +380,16 @@ foreach mti = AllMasks in {
 
 let Predicates = [HasStdExtV, HasStdExtF] in {
 
+// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
+defm "" : VPatBinaryFPSDNode_VV_VF<fadd, "PseudoVFADD">;
+defm "" : VPatBinaryFPSDNode_VV_VF<fsub, "PseudoVFSUB">;
+defm "" : VPatBinaryFPSDNode_R_VF<fsub, "PseudoVFRSUB">;
+
+// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
+defm "" : VPatBinaryFPSDNode_VV_VF<fmul, "PseudoVFMUL">;
+defm "" : VPatBinaryFPSDNode_VV_VF<fdiv, "PseudoVFDIV">;
+defm "" : VPatBinaryFPSDNode_R_VF<fdiv, "PseudoVFRDIV">;
+
 // 14.11. Vector Floating-Point Compare Instructions
 defm "" : VPatFPSetCCSDNode_VV_VF_FV<SETEQ,  "PseudoVMFEQ", "PseudoVMFEQ">;
 defm "" : VPatFPSetCCSDNode_VV_VF_FV<SETOEQ, "PseudoVMFEQ", "PseudoVMFEQ">;

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode-rv32.ll
new file mode 100644
index 000000000000..62779ea42e5a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode-rv32.ll
@@ -0,0 +1,380 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x half> @vfadd_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 1 x half> %va, %vb
+  ret <vscale x 1 x half> %vc
+}
+
+define <vscale x 1 x half> @vfadd_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fadd <vscale x 1 x half> %va, %splat
+  ret <vscale x 1 x half> %vc
+}
+
+define <vscale x 2 x half> @vfadd_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 2 x half> %va, %vb
+  ret <vscale x 2 x half> %vc
+}
+
+define <vscale x 2 x half> @vfadd_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fadd <vscale x 2 x half> %va, %splat
+  ret <vscale x 2 x half> %vc
+}
+
+define <vscale x 4 x half> @vfadd_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 4 x half> %va, %vb
+  ret <vscale x 4 x half> %vc
+}
+
+define <vscale x 4 x half> @vfadd_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fadd <vscale x 4 x half> %va, %splat
+  ret <vscale x 4 x half> %vc
+}
+
+define <vscale x 8 x half> @vfadd_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfadd_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fadd <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfadd_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fadd <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 16 x half> @vfadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 16 x half> %va, %vb
+  ret <vscale x 16 x half> %vc
+}
+
+define <vscale x 16 x half> @vfadd_vf_nxv16f16(<vscale x 16 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer
+  %vc = fadd <vscale x 16 x half> %va, %splat
+  ret <vscale x 16 x half> %vc
+}
+
+define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vfadd.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 32 x half> %va, %vb
+  ret <vscale x 32 x half> %vc
+}
+
+define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 32 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer
+  %vc = fadd <vscale x 32 x half> %va, %splat
+  ret <vscale x 32 x half> %vc
+}
+
+define <vscale x 1 x float> @vfadd_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 1 x float> %va, %vb
+  ret <vscale x 1 x float> %vc
+}
+
+define <vscale x 1 x float> @vfadd_vf_nxv1f32(<vscale x 1 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_vf_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fadd <vscale x 1 x float> %va, %splat
+  ret <vscale x 1 x float> %vc
+}
+
+define <vscale x 2 x float> @vfadd_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 2 x float> %va, %vb
+  ret <vscale x 2 x float> %vc
+}
+
+define <vscale x 2 x float> @vfadd_vf_nxv2f32(<vscale x 2 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_vf_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fadd <vscale x 2 x float> %va, %splat
+  ret <vscale x 2 x float> %vc
+}
+
+define <vscale x 4 x float> @vfadd_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 4 x float> %va, %vb
+  ret <vscale x 4 x float> %vc
+}
+
+define <vscale x 4 x float> @vfadd_vf_nxv4f32(<vscale x 4 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_vf_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fadd <vscale x 4 x float> %va, %splat
+  ret <vscale x 4 x float> %vc
+}
+
+define <vscale x 8 x float> @vfadd_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfadd_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fadd <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfadd_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fadd <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 16 x float> @vfadd_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfadd.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 16 x float> %va, %vb
+  ret <vscale x 16 x float> %vc
+}
+
+define <vscale x 16 x float> @vfadd_vf_nxv16f32(<vscale x 16 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_vf_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer
+  %vc = fadd <vscale x 16 x float> %va, %splat
+  ret <vscale x 16 x float> %vc
+}
+
+define <vscale x 1 x double> @vfadd_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 1 x double> %va, %vb
+  ret <vscale x 1 x double> %vc
+}
+
+define <vscale x 1 x double> @vfadd_vf_nxv1f64(<vscale x 1 x double> %va, double %b) {
+; CHECK-LABEL: vfadd_vf_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fadd <vscale x 1 x double> %va, %splat
+  ret <vscale x 1 x double> %vc
+}
+
+define <vscale x 2 x double> @vfadd_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 2 x double> %va, %vb
+  ret <vscale x 2 x double> %vc
+}
+
+define <vscale x 2 x double> @vfadd_vf_nxv2f64(<vscale x 2 x double> %va, double %b) {
+; CHECK-LABEL: vfadd_vf_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fadd <vscale x 2 x double> %va, %splat
+  ret <vscale x 2 x double> %vc
+}
+
+define <vscale x 4 x double> @vfadd_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 4 x double> %va, %vb
+  ret <vscale x 4 x double> %vc
+}
+
+define <vscale x 4 x double> @vfadd_vf_nxv4f64(<vscale x 4 x double> %va, double %b) {
+; CHECK-LABEL: vfadd_vf_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fadd <vscale x 4 x double> %va, %splat
+  ret <vscale x 4 x double> %vc
+}
+
+define <vscale x 8 x double> @vfadd_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vfadd.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfadd_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfadd_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fadd <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfadd_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfadd_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fadd <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x double> %vc
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode-rv64.ll
new file mode 100644
index 000000000000..df7970ad1fcf
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode-rv64.ll
@@ -0,0 +1,380 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x half> @vfadd_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 1 x half> %va, %vb
+  ret <vscale x 1 x half> %vc
+}
+
+define <vscale x 1 x half> @vfadd_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fadd <vscale x 1 x half> %va, %splat
+  ret <vscale x 1 x half> %vc
+}
+
+define <vscale x 2 x half> @vfadd_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 2 x half> %va, %vb
+  ret <vscale x 2 x half> %vc
+}
+
+define <vscale x 2 x half> @vfadd_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fadd <vscale x 2 x half> %va, %splat
+  ret <vscale x 2 x half> %vc
+}
+
+define <vscale x 4 x half> @vfadd_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 4 x half> %va, %vb
+  ret <vscale x 4 x half> %vc
+}
+
+define <vscale x 4 x half> @vfadd_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fadd <vscale x 4 x half> %va, %splat
+  ret <vscale x 4 x half> %vc
+}
+
+define <vscale x 8 x half> @vfadd_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfadd_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fadd <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfadd_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fadd <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 16 x half> @vfadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 16 x half> %va, %vb
+  ret <vscale x 16 x half> %vc
+}
+
+define <vscale x 16 x half> @vfadd_vf_nxv16f16(<vscale x 16 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer
+  %vc = fadd <vscale x 16 x half> %va, %splat
+  ret <vscale x 16 x half> %vc
+}
+
+define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vfadd.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 32 x half> %va, %vb
+  ret <vscale x 32 x half> %vc
+}
+
+define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b) {
+; CHECK-LABEL: vfadd_vf_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 32 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer
+  %vc = fadd <vscale x 32 x half> %va, %splat
+  ret <vscale x 32 x half> %vc
+}
+
+define <vscale x 1 x float> @vfadd_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 1 x float> %va, %vb
+  ret <vscale x 1 x float> %vc
+}
+
+define <vscale x 1 x float> @vfadd_vf_nxv1f32(<vscale x 1 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_vf_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fadd <vscale x 1 x float> %va, %splat
+  ret <vscale x 1 x float> %vc
+}
+
+define <vscale x 2 x float> @vfadd_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 2 x float> %va, %vb
+  ret <vscale x 2 x float> %vc
+}
+
+define <vscale x 2 x float> @vfadd_vf_nxv2f32(<vscale x 2 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_vf_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fadd <vscale x 2 x float> %va, %splat
+  ret <vscale x 2 x float> %vc
+}
+
+define <vscale x 4 x float> @vfadd_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 4 x float> %va, %vb
+  ret <vscale x 4 x float> %vc
+}
+
+define <vscale x 4 x float> @vfadd_vf_nxv4f32(<vscale x 4 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_vf_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fadd <vscale x 4 x float> %va, %splat
+  ret <vscale x 4 x float> %vc
+}
+
+define <vscale x 8 x float> @vfadd_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfadd_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fadd <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfadd_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fadd <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 16 x float> @vfadd_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfadd.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 16 x float> %va, %vb
+  ret <vscale x 16 x float> %vc
+}
+
+define <vscale x 16 x float> @vfadd_vf_nxv16f32(<vscale x 16 x float> %va, float %b) {
+; CHECK-LABEL: vfadd_vf_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer
+  %vc = fadd <vscale x 16 x float> %va, %splat
+  ret <vscale x 16 x float> %vc
+}
+
+define <vscale x 1 x double> @vfadd_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 1 x double> %va, %vb
+  ret <vscale x 1 x double> %vc
+}
+
+define <vscale x 1 x double> @vfadd_vf_nxv1f64(<vscale x 1 x double> %va, double %b) {
+; CHECK-LABEL: vfadd_vf_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fadd <vscale x 1 x double> %va, %splat
+  ret <vscale x 1 x double> %vc
+}
+
+define <vscale x 2 x double> @vfadd_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 2 x double> %va, %vb
+  ret <vscale x 2 x double> %vc
+}
+
+define <vscale x 2 x double> @vfadd_vf_nxv2f64(<vscale x 2 x double> %va, double %b) {
+; CHECK-LABEL: vfadd_vf_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fadd <vscale x 2 x double> %va, %splat
+  ret <vscale x 2 x double> %vc
+}
+
+define <vscale x 4 x double> @vfadd_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 4 x double> %va, %vb
+  ret <vscale x 4 x double> %vc
+}
+
+define <vscale x 4 x double> @vfadd_vf_nxv4f64(<vscale x 4 x double> %va, double %b) {
+; CHECK-LABEL: vfadd_vf_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fadd <vscale x 4 x double> %va, %splat
+  ret <vscale x 4 x double> %vc
+}
+
+define <vscale x 8 x double> @vfadd_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: vfadd_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vfadd.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fadd <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfadd_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfadd_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fadd <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfadd_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfadd_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fadd <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x double> %vc
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode-rv32.ll
new file mode 100644
index 000000000000..93177a88b297
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode-rv32.ll
@@ -0,0 +1,380 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x half> @vfdiv_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 1 x half> %va, %vb
+  ret <vscale x 1 x half> %vc
+}
+
+define <vscale x 1 x half> @vfdiv_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fdiv <vscale x 1 x half> %va, %splat
+  ret <vscale x 1 x half> %vc
+}
+
+define <vscale x 2 x half> @vfdiv_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 2 x half> %va, %vb
+  ret <vscale x 2 x half> %vc
+}
+
+define <vscale x 2 x half> @vfdiv_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fdiv <vscale x 2 x half> %va, %splat
+  ret <vscale x 2 x half> %vc
+}
+
+define <vscale x 4 x half> @vfdiv_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 4 x half> %va, %vb
+  ret <vscale x 4 x half> %vc
+}
+
+define <vscale x 4 x half> @vfdiv_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fdiv <vscale x 4 x half> %va, %splat
+  ret <vscale x 4 x half> %vc
+}
+
+define <vscale x 8 x half> @vfdiv_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfdiv_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fdiv <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfdiv_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fdiv <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 16 x half> @vfdiv_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 16 x half> %va, %vb
+  ret <vscale x 16 x half> %vc
+}
+
+define <vscale x 16 x half> @vfdiv_vf_nxv16f16(<vscale x 16 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer
+  %vc = fdiv <vscale x 16 x half> %va, %splat
+  ret <vscale x 16 x half> %vc
+}
+
+define <vscale x 32 x half> @vfdiv_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vfdiv.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 32 x half> %va, %vb
+  ret <vscale x 32 x half> %vc
+}
+
+define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 32 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer
+  %vc = fdiv <vscale x 32 x half> %va, %splat
+  ret <vscale x 32 x half> %vc
+}
+
+define <vscale x 1 x float> @vfdiv_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 1 x float> %va, %vb
+  ret <vscale x 1 x float> %vc
+}
+
+define <vscale x 1 x float> @vfdiv_vf_nxv1f32(<vscale x 1 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_vf_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fdiv <vscale x 1 x float> %va, %splat
+  ret <vscale x 1 x float> %vc
+}
+
+define <vscale x 2 x float> @vfdiv_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 2 x float> %va, %vb
+  ret <vscale x 2 x float> %vc
+}
+
+define <vscale x 2 x float> @vfdiv_vf_nxv2f32(<vscale x 2 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_vf_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fdiv <vscale x 2 x float> %va, %splat
+  ret <vscale x 2 x float> %vc
+}
+
+define <vscale x 4 x float> @vfdiv_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 4 x float> %va, %vb
+  ret <vscale x 4 x float> %vc
+}
+
+define <vscale x 4 x float> @vfdiv_vf_nxv4f32(<vscale x 4 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_vf_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fdiv <vscale x 4 x float> %va, %splat
+  ret <vscale x 4 x float> %vc
+}
+
+define <vscale x 8 x float> @vfdiv_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfdiv_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fdiv <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfdiv_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fdiv <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 16 x float> @vfdiv_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfdiv.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 16 x float> %va, %vb
+  ret <vscale x 16 x float> %vc
+}
+
+define <vscale x 16 x float> @vfdiv_vf_nxv16f32(<vscale x 16 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_vf_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer
+  %vc = fdiv <vscale x 16 x float> %va, %splat
+  ret <vscale x 16 x float> %vc
+}
+
+define <vscale x 1 x double> @vfdiv_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 1 x double> %va, %vb
+  ret <vscale x 1 x double> %vc
+}
+
+define <vscale x 1 x double> @vfdiv_vf_nxv1f64(<vscale x 1 x double> %va, double %b) {
+; CHECK-LABEL: vfdiv_vf_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fdiv <vscale x 1 x double> %va, %splat
+  ret <vscale x 1 x double> %vc
+}
+
+define <vscale x 2 x double> @vfdiv_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 2 x double> %va, %vb
+  ret <vscale x 2 x double> %vc
+}
+
+define <vscale x 2 x double> @vfdiv_vf_nxv2f64(<vscale x 2 x double> %va, double %b) {
+; CHECK-LABEL: vfdiv_vf_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fdiv <vscale x 2 x double> %va, %splat
+  ret <vscale x 2 x double> %vc
+}
+
+define <vscale x 4 x double> @vfdiv_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 4 x double> %va, %vb
+  ret <vscale x 4 x double> %vc
+}
+
+define <vscale x 4 x double> @vfdiv_vf_nxv4f64(<vscale x 4 x double> %va, double %b) {
+; CHECK-LABEL: vfdiv_vf_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fdiv <vscale x 4 x double> %va, %splat
+  ret <vscale x 4 x double> %vc
+}
+
+define <vscale x 8 x double> @vfdiv_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vfdiv.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfdiv_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfdiv_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fdiv <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfdiv_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfdiv_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fdiv <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x double> %vc
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode-rv64.ll
new file mode 100644
index 000000000000..04c536fa4efe
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode-rv64.ll
@@ -0,0 +1,380 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x half> @vfdiv_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 1 x half> %va, %vb
+  ret <vscale x 1 x half> %vc
+}
+
+define <vscale x 1 x half> @vfdiv_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fdiv <vscale x 1 x half> %va, %splat
+  ret <vscale x 1 x half> %vc
+}
+
+define <vscale x 2 x half> @vfdiv_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 2 x half> %va, %vb
+  ret <vscale x 2 x half> %vc
+}
+
+define <vscale x 2 x half> @vfdiv_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fdiv <vscale x 2 x half> %va, %splat
+  ret <vscale x 2 x half> %vc
+}
+
+define <vscale x 4 x half> @vfdiv_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 4 x half> %va, %vb
+  ret <vscale x 4 x half> %vc
+}
+
+define <vscale x 4 x half> @vfdiv_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fdiv <vscale x 4 x half> %va, %splat
+  ret <vscale x 4 x half> %vc
+}
+
+define <vscale x 8 x half> @vfdiv_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfdiv_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fdiv <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfdiv_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fdiv <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 16 x half> @vfdiv_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 16 x half> %va, %vb
+  ret <vscale x 16 x half> %vc
+}
+
+define <vscale x 16 x half> @vfdiv_vf_nxv16f16(<vscale x 16 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer
+  %vc = fdiv <vscale x 16 x half> %va, %splat
+  ret <vscale x 16 x half> %vc
+}
+
+define <vscale x 32 x half> @vfdiv_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vfdiv.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 32 x half> %va, %vb
+  ret <vscale x 32 x half> %vc
+}
+
+define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b) {
+; CHECK-LABEL: vfdiv_vf_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 32 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer
+  %vc = fdiv <vscale x 32 x half> %va, %splat
+  ret <vscale x 32 x half> %vc
+}
+
+define <vscale x 1 x float> @vfdiv_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 1 x float> %va, %vb
+  ret <vscale x 1 x float> %vc
+}
+
+define <vscale x 1 x float> @vfdiv_vf_nxv1f32(<vscale x 1 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_vf_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fdiv <vscale x 1 x float> %va, %splat
+  ret <vscale x 1 x float> %vc
+}
+
+define <vscale x 2 x float> @vfdiv_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 2 x float> %va, %vb
+  ret <vscale x 2 x float> %vc
+}
+
+define <vscale x 2 x float> @vfdiv_vf_nxv2f32(<vscale x 2 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_vf_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fdiv <vscale x 2 x float> %va, %splat
+  ret <vscale x 2 x float> %vc
+}
+
+define <vscale x 4 x float> @vfdiv_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 4 x float> %va, %vb
+  ret <vscale x 4 x float> %vc
+}
+
+define <vscale x 4 x float> @vfdiv_vf_nxv4f32(<vscale x 4 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_vf_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fdiv <vscale x 4 x float> %va, %splat
+  ret <vscale x 4 x float> %vc
+}
+
+define <vscale x 8 x float> @vfdiv_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfdiv_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fdiv <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfdiv_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fdiv <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 16 x float> @vfdiv_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfdiv.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 16 x float> %va, %vb
+  ret <vscale x 16 x float> %vc
+}
+
+define <vscale x 16 x float> @vfdiv_vf_nxv16f32(<vscale x 16 x float> %va, float %b) {
+; CHECK-LABEL: vfdiv_vf_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer
+  %vc = fdiv <vscale x 16 x float> %va, %splat
+  ret <vscale x 16 x float> %vc
+}
+
+define <vscale x 1 x double> @vfdiv_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 1 x double> %va, %vb
+  ret <vscale x 1 x double> %vc
+}
+
+define <vscale x 1 x double> @vfdiv_vf_nxv1f64(<vscale x 1 x double> %va, double %b) {
+; CHECK-LABEL: vfdiv_vf_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fdiv <vscale x 1 x double> %va, %splat
+  ret <vscale x 1 x double> %vc
+}
+
+define <vscale x 2 x double> @vfdiv_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 2 x double> %va, %vb
+  ret <vscale x 2 x double> %vc
+}
+
+define <vscale x 2 x double> @vfdiv_vf_nxv2f64(<vscale x 2 x double> %va, double %b) {
+; CHECK-LABEL: vfdiv_vf_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fdiv <vscale x 2 x double> %va, %splat
+  ret <vscale x 2 x double> %vc
+}
+
+define <vscale x 4 x double> @vfdiv_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 4 x double> %va, %vb
+  ret <vscale x 4 x double> %vc
+}
+
+define <vscale x 4 x double> @vfdiv_vf_nxv4f64(<vscale x 4 x double> %va, double %b) {
+; CHECK-LABEL: vfdiv_vf_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fdiv <vscale x 4 x double> %va, %splat
+  ret <vscale x 4 x double> %vc
+}
+
+define <vscale x 8 x double> @vfdiv_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: vfdiv_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vfdiv.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fdiv <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfdiv_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfdiv_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fdiv <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfdiv_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfdiv_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fdiv <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x double> %vc
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode-rv32.ll
new file mode 100644
index 000000000000..9a29ece86a1a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode-rv32.ll
@@ -0,0 +1,380 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x half> @vfmul_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 1 x half> %va, %vb
+  ret <vscale x 1 x half> %vc
+}
+
+define <vscale x 1 x half> @vfmul_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fmul <vscale x 1 x half> %va, %splat
+  ret <vscale x 1 x half> %vc
+}
+
+define <vscale x 2 x half> @vfmul_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 2 x half> %va, %vb
+  ret <vscale x 2 x half> %vc
+}
+
+define <vscale x 2 x half> @vfmul_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fmul <vscale x 2 x half> %va, %splat
+  ret <vscale x 2 x half> %vc
+}
+
+define <vscale x 4 x half> @vfmul_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 4 x half> %va, %vb
+  ret <vscale x 4 x half> %vc
+}
+
+define <vscale x 4 x half> @vfmul_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fmul <vscale x 4 x half> %va, %splat
+  ret <vscale x 4 x half> %vc
+}
+
+define <vscale x 8 x half> @vfmul_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfmul_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fmul <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfmul_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fmul <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 16 x half> @vfmul_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 16 x half> %va, %vb
+  ret <vscale x 16 x half> %vc
+}
+
+define <vscale x 16 x half> @vfmul_vf_nxv16f16(<vscale x 16 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer
+  %vc = fmul <vscale x 16 x half> %va, %splat
+  ret <vscale x 16 x half> %vc
+}
+
+define <vscale x 32 x half> @vfmul_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vfmul.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 32 x half> %va, %vb
+  ret <vscale x 32 x half> %vc
+}
+
+define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 32 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer
+  %vc = fmul <vscale x 32 x half> %va, %splat
+  ret <vscale x 32 x half> %vc
+}
+
+define <vscale x 1 x float> @vfmul_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 1 x float> %va, %vb
+  ret <vscale x 1 x float> %vc
+}
+
+define <vscale x 1 x float> @vfmul_vf_nxv1f32(<vscale x 1 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_vf_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fmul <vscale x 1 x float> %va, %splat
+  ret <vscale x 1 x float> %vc
+}
+
+define <vscale x 2 x float> @vfmul_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 2 x float> %va, %vb
+  ret <vscale x 2 x float> %vc
+}
+
+define <vscale x 2 x float> @vfmul_vf_nxv2f32(<vscale x 2 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_vf_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fmul <vscale x 2 x float> %va, %splat
+  ret <vscale x 2 x float> %vc
+}
+
+define <vscale x 4 x float> @vfmul_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 4 x float> %va, %vb
+  ret <vscale x 4 x float> %vc
+}
+
+define <vscale x 4 x float> @vfmul_vf_nxv4f32(<vscale x 4 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_vf_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fmul <vscale x 4 x float> %va, %splat
+  ret <vscale x 4 x float> %vc
+}
+
+define <vscale x 8 x float> @vfmul_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfmul_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fmul <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfmul_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fmul <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 16 x float> @vfmul_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfmul.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 16 x float> %va, %vb
+  ret <vscale x 16 x float> %vc
+}
+
+define <vscale x 16 x float> @vfmul_vf_nxv16f32(<vscale x 16 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_vf_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer
+  %vc = fmul <vscale x 16 x float> %va, %splat
+  ret <vscale x 16 x float> %vc
+}
+
+define <vscale x 1 x double> @vfmul_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 1 x double> %va, %vb
+  ret <vscale x 1 x double> %vc
+}
+
+define <vscale x 1 x double> @vfmul_vf_nxv1f64(<vscale x 1 x double> %va, double %b) {
+; CHECK-LABEL: vfmul_vf_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fmul <vscale x 1 x double> %va, %splat
+  ret <vscale x 1 x double> %vc
+}
+
+define <vscale x 2 x double> @vfmul_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 2 x double> %va, %vb
+  ret <vscale x 2 x double> %vc
+}
+
+define <vscale x 2 x double> @vfmul_vf_nxv2f64(<vscale x 2 x double> %va, double %b) {
+; CHECK-LABEL: vfmul_vf_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fmul <vscale x 2 x double> %va, %splat
+  ret <vscale x 2 x double> %vc
+}
+
+define <vscale x 4 x double> @vfmul_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 4 x double> %va, %vb
+  ret <vscale x 4 x double> %vc
+}
+
+define <vscale x 4 x double> @vfmul_vf_nxv4f64(<vscale x 4 x double> %va, double %b) {
+; CHECK-LABEL: vfmul_vf_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fmul <vscale x 4 x double> %va, %splat
+  ret <vscale x 4 x double> %vc
+}
+
+define <vscale x 8 x double> @vfmul_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vfmul.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfmul_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfmul_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fmul <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfmul_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfmul_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fmul <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x double> %vc
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode-rv64.ll
new file mode 100644
index 000000000000..d1c9ee75270e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode-rv64.ll
@@ -0,0 +1,380 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x half> @vfmul_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 1 x half> %va, %vb
+  ret <vscale x 1 x half> %vc
+}
+
+define <vscale x 1 x half> @vfmul_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fmul <vscale x 1 x half> %va, %splat
+  ret <vscale x 1 x half> %vc
+}
+
+define <vscale x 2 x half> @vfmul_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 2 x half> %va, %vb
+  ret <vscale x 2 x half> %vc
+}
+
+define <vscale x 2 x half> @vfmul_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fmul <vscale x 2 x half> %va, %splat
+  ret <vscale x 2 x half> %vc
+}
+
+define <vscale x 4 x half> @vfmul_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 4 x half> %va, %vb
+  ret <vscale x 4 x half> %vc
+}
+
+define <vscale x 4 x half> @vfmul_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fmul <vscale x 4 x half> %va, %splat
+  ret <vscale x 4 x half> %vc
+}
+
+define <vscale x 8 x half> @vfmul_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfmul_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fmul <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfmul_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fmul <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 16 x half> @vfmul_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 16 x half> %va, %vb
+  ret <vscale x 16 x half> %vc
+}
+
+define <vscale x 16 x half> @vfmul_vf_nxv16f16(<vscale x 16 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer
+  %vc = fmul <vscale x 16 x half> %va, %splat
+  ret <vscale x 16 x half> %vc
+}
+
+define <vscale x 32 x half> @vfmul_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vfmul.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 32 x half> %va, %vb
+  ret <vscale x 32 x half> %vc
+}
+
+define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b) {
+; CHECK-LABEL: vfmul_vf_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 32 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer
+  %vc = fmul <vscale x 32 x half> %va, %splat
+  ret <vscale x 32 x half> %vc
+}
+
+define <vscale x 1 x float> @vfmul_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 1 x float> %va, %vb
+  ret <vscale x 1 x float> %vc
+}
+
+define <vscale x 1 x float> @vfmul_vf_nxv1f32(<vscale x 1 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_vf_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fmul <vscale x 1 x float> %va, %splat
+  ret <vscale x 1 x float> %vc
+}
+
+define <vscale x 2 x float> @vfmul_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 2 x float> %va, %vb
+  ret <vscale x 2 x float> %vc
+}
+
+define <vscale x 2 x float> @vfmul_vf_nxv2f32(<vscale x 2 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_vf_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fmul <vscale x 2 x float> %va, %splat
+  ret <vscale x 2 x float> %vc
+}
+
+define <vscale x 4 x float> @vfmul_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 4 x float> %va, %vb
+  ret <vscale x 4 x float> %vc
+}
+
+define <vscale x 4 x float> @vfmul_vf_nxv4f32(<vscale x 4 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_vf_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fmul <vscale x 4 x float> %va, %splat
+  ret <vscale x 4 x float> %vc
+}
+
+define <vscale x 8 x float> @vfmul_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfmul_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fmul <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfmul_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fmul <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 16 x float> @vfmul_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfmul.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 16 x float> %va, %vb
+  ret <vscale x 16 x float> %vc
+}
+
+define <vscale x 16 x float> @vfmul_vf_nxv16f32(<vscale x 16 x float> %va, float %b) {
+; CHECK-LABEL: vfmul_vf_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer
+  %vc = fmul <vscale x 16 x float> %va, %splat
+  ret <vscale x 16 x float> %vc
+}
+
+define <vscale x 1 x double> @vfmul_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 1 x double> %va, %vb
+  ret <vscale x 1 x double> %vc
+}
+
+define <vscale x 1 x double> @vfmul_vf_nxv1f64(<vscale x 1 x double> %va, double %b) {
+; CHECK-LABEL: vfmul_vf_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fmul <vscale x 1 x double> %va, %splat
+  ret <vscale x 1 x double> %vc
+}
+
+define <vscale x 2 x double> @vfmul_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 2 x double> %va, %vb
+  ret <vscale x 2 x double> %vc
+}
+
+define <vscale x 2 x double> @vfmul_vf_nxv2f64(<vscale x 2 x double> %va, double %b) {
+; CHECK-LABEL: vfmul_vf_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fmul <vscale x 2 x double> %va, %splat
+  ret <vscale x 2 x double> %vc
+}
+
+define <vscale x 4 x double> @vfmul_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 4 x double> %va, %vb
+  ret <vscale x 4 x double> %vc
+}
+
+define <vscale x 4 x double> @vfmul_vf_nxv4f64(<vscale x 4 x double> %va, double %b) {
+; CHECK-LABEL: vfmul_vf_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fmul <vscale x 4 x double> %va, %splat
+  ret <vscale x 4 x double> %vc
+}
+
+define <vscale x 8 x double> @vfmul_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: vfmul_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vfmul.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fmul <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfmul_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfmul_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fmul <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfmul_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfmul_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fmul <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x double> %vc
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode-rv32.ll
new file mode 100644
index 000000000000..795b92108c69
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode-rv32.ll
@@ -0,0 +1,380 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x half> @vfsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 1 x half> %va, %vb
+  ret <vscale x 1 x half> %vc
+}
+
+define <vscale x 1 x half> @vfsub_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fsub <vscale x 1 x half> %va, %splat
+  ret <vscale x 1 x half> %vc
+}
+
+define <vscale x 2 x half> @vfsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 2 x half> %va, %vb
+  ret <vscale x 2 x half> %vc
+}
+
+define <vscale x 2 x half> @vfsub_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fsub <vscale x 2 x half> %va, %splat
+  ret <vscale x 2 x half> %vc
+}
+
+define <vscale x 4 x half> @vfsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 4 x half> %va, %vb
+  ret <vscale x 4 x half> %vc
+}
+
+define <vscale x 4 x half> @vfsub_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fsub <vscale x 4 x half> %va, %splat
+  ret <vscale x 4 x half> %vc
+}
+
+define <vscale x 8 x half> @vfsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfsub_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fsub <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfsub_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fsub <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 16 x half> @vfsub_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 16 x half> %va, %vb
+  ret <vscale x 16 x half> %vc
+}
+
+define <vscale x 16 x half> @vfsub_vf_nxv16f16(<vscale x 16 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer
+  %vc = fsub <vscale x 16 x half> %va, %splat
+  ret <vscale x 16 x half> %vc
+}
+
+define <vscale x 32 x half> @vfsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vfsub.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 32 x half> %va, %vb
+  ret <vscale x 32 x half> %vc
+}
+
+define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 32 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer
+  %vc = fsub <vscale x 32 x half> %va, %splat
+  ret <vscale x 32 x half> %vc
+}
+
+define <vscale x 1 x float> @vfsub_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 1 x float> %va, %vb
+  ret <vscale x 1 x float> %vc
+}
+
+define <vscale x 1 x float> @vfsub_vf_nxv1f32(<vscale x 1 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_vf_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fsub <vscale x 1 x float> %va, %splat
+  ret <vscale x 1 x float> %vc
+}
+
+define <vscale x 2 x float> @vfsub_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 2 x float> %va, %vb
+  ret <vscale x 2 x float> %vc
+}
+
+define <vscale x 2 x float> @vfsub_vf_nxv2f32(<vscale x 2 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_vf_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fsub <vscale x 2 x float> %va, %splat
+  ret <vscale x 2 x float> %vc
+}
+
+define <vscale x 4 x float> @vfsub_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 4 x float> %va, %vb
+  ret <vscale x 4 x float> %vc
+}
+
+define <vscale x 4 x float> @vfsub_vf_nxv4f32(<vscale x 4 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_vf_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fsub <vscale x 4 x float> %va, %splat
+  ret <vscale x 4 x float> %vc
+}
+
+define <vscale x 8 x float> @vfsub_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfsub_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fsub <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfsub_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fsub <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 16 x float> @vfsub_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfsub.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 16 x float> %va, %vb
+  ret <vscale x 16 x float> %vc
+}
+
+define <vscale x 16 x float> @vfsub_vf_nxv16f32(<vscale x 16 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_vf_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer
+  %vc = fsub <vscale x 16 x float> %va, %splat
+  ret <vscale x 16 x float> %vc
+}
+
+define <vscale x 1 x double> @vfsub_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 1 x double> %va, %vb
+  ret <vscale x 1 x double> %vc
+}
+
+define <vscale x 1 x double> @vfsub_vf_nxv1f64(<vscale x 1 x double> %va, double %b) {
+; CHECK-LABEL: vfsub_vf_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fsub <vscale x 1 x double> %va, %splat
+  ret <vscale x 1 x double> %vc
+}
+
+define <vscale x 2 x double> @vfsub_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 2 x double> %va, %vb
+  ret <vscale x 2 x double> %vc
+}
+
+define <vscale x 2 x double> @vfsub_vf_nxv2f64(<vscale x 2 x double> %va, double %b) {
+; CHECK-LABEL: vfsub_vf_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fsub <vscale x 2 x double> %va, %splat
+  ret <vscale x 2 x double> %vc
+}
+
+define <vscale x 4 x double> @vfsub_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 4 x double> %va, %vb
+  ret <vscale x 4 x double> %vc
+}
+
+define <vscale x 4 x double> @vfsub_vf_nxv4f64(<vscale x 4 x double> %va, double %b) {
+; CHECK-LABEL: vfsub_vf_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fsub <vscale x 4 x double> %va, %splat
+  ret <vscale x 4 x double> %vc
+}
+
+define <vscale x 8 x double> @vfsub_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vfsub.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfsub_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfsub_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fsub <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfsub_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfsub_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fsub <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x double> %vc
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode-rv64.ll
new file mode 100644
index 000000000000..ccf885f78c4b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode-rv64.ll
@@ -0,0 +1,380 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x half> @vfsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 1 x half> %va, %vb
+  ret <vscale x 1 x half> %vc
+}
+
+define <vscale x 1 x half> @vfsub_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fsub <vscale x 1 x half> %va, %splat
+  ret <vscale x 1 x half> %vc
+}
+
+define <vscale x 2 x half> @vfsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 2 x half> %va, %vb
+  ret <vscale x 2 x half> %vc
+}
+
+define <vscale x 2 x half> @vfsub_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fsub <vscale x 2 x half> %va, %splat
+  ret <vscale x 2 x half> %vc
+}
+
+define <vscale x 4 x half> @vfsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 4 x half> %va, %vb
+  ret <vscale x 4 x half> %vc
+}
+
+define <vscale x 4 x half> @vfsub_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fsub <vscale x 4 x half> %va, %splat
+  ret <vscale x 4 x half> %vc
+}
+
+define <vscale x 8 x half> @vfsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfsub_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fsub <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 8 x half> @vfsub_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fsub <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x half> %vc
+}
+
+define <vscale x 16 x half> @vfsub_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 16 x half> %va, %vb
+  ret <vscale x 16 x half> %vc
+}
+
+define <vscale x 16 x half> @vfsub_vf_nxv16f16(<vscale x 16 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer
+  %vc = fsub <vscale x 16 x half> %va, %splat
+  ret <vscale x 16 x half> %vc
+}
+
+define <vscale x 32 x half> @vfsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vfsub.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 32 x half> %va, %vb
+  ret <vscale x 32 x half> %vc
+}
+
+define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b) {
+; CHECK-LABEL: vfsub_vf_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 32 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer
+  %vc = fsub <vscale x 32 x half> %va, %splat
+  ret <vscale x 32 x half> %vc
+}
+
+define <vscale x 1 x float> @vfsub_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 1 x float> %va, %vb
+  ret <vscale x 1 x float> %vc
+}
+
+define <vscale x 1 x float> @vfsub_vf_nxv1f32(<vscale x 1 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_vf_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fsub <vscale x 1 x float> %va, %splat
+  ret <vscale x 1 x float> %vc
+}
+
+define <vscale x 2 x float> @vfsub_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 2 x float> %va, %vb
+  ret <vscale x 2 x float> %vc
+}
+
+define <vscale x 2 x float> @vfsub_vf_nxv2f32(<vscale x 2 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_vf_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fsub <vscale x 2 x float> %va, %splat
+  ret <vscale x 2 x float> %vc
+}
+
+define <vscale x 4 x float> @vfsub_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 4 x float> %va, %vb
+  ret <vscale x 4 x float> %vc
+}
+
+define <vscale x 4 x float> @vfsub_vf_nxv4f32(<vscale x 4 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_vf_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fsub <vscale x 4 x float> %va, %splat
+  ret <vscale x 4 x float> %vc
+}
+
+define <vscale x 8 x float> @vfsub_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfsub_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fsub <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 8 x float> @vfsub_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fsub <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x float> %vc
+}
+
+define <vscale x 16 x float> @vfsub_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfsub.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 16 x float> %va, %vb
+  ret <vscale x 16 x float> %vc
+}
+
+define <vscale x 16 x float> @vfsub_vf_nxv16f32(<vscale x 16 x float> %va, float %b) {
+; CHECK-LABEL: vfsub_vf_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer
+  %vc = fsub <vscale x 16 x float> %va, %splat
+  ret <vscale x 16 x float> %vc
+}
+
+define <vscale x 1 x double> @vfsub_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v17
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 1 x double> %va, %vb
+  ret <vscale x 1 x double> %vc
+}
+
+define <vscale x 1 x double> @vfsub_vf_nxv1f64(<vscale x 1 x double> %va, double %b) {
+; CHECK-LABEL: vfsub_vf_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = fsub <vscale x 1 x double> %va, %splat
+  ret <vscale x 1 x double> %vc
+}
+
+define <vscale x 2 x double> @vfsub_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 2 x double> %va, %vb
+  ret <vscale x 2 x double> %vc
+}
+
+define <vscale x 2 x double> @vfsub_vf_nxv2f64(<vscale x 2 x double> %va, double %b) {
+; CHECK-LABEL: vfsub_vf_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = fsub <vscale x 2 x double> %va, %splat
+  ret <vscale x 2 x double> %vc
+}
+
+define <vscale x 4 x double> @vfsub_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 4 x double> %va, %vb
+  ret <vscale x 4 x double> %vc
+}
+
+define <vscale x 4 x double> @vfsub_vf_nxv4f64(<vscale x 4 x double> %va, double %b) {
+; CHECK-LABEL: vfsub_vf_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = fsub <vscale x 4 x double> %va, %splat
+  ret <vscale x 4 x double> %vc
+}
+
+define <vscale x 8 x double> @vfsub_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: vfsub_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vfsub.vv v16, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fsub <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfsub_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfsub_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fsub <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x double> %vc
+}
+
+define <vscale x 8 x double> @vfsub_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: vfsub_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fsub <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x double> %vc
+}
+


        


More information about the llvm-branch-commits mailing list