[llvm-branch-commits] [llvm] 350c055 - [NFC][RISCV] Add double type in RISC-V V CodeGen test cases for RV32.

Hsiangkai Wang via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Wed Jan 13 07:51:02 PST 2021


Author: Hsiangkai Wang
Date: 2021-01-13T23:45:13+08:00
New Revision: 350c0552c66bf0ca6907b6aa8cede425dedde516

URL: https://github.com/llvm/llvm-project/commit/350c0552c66bf0ca6907b6aa8cede425dedde516
DIFF: https://github.com/llvm/llvm-project/commit/350c0552c66bf0ca6907b6aa8cede425dedde516.diff

LOG: [NFC][RISCV] Add double type in RISC-V V CodeGen test cases for RV32.

Differential Revision: https://reviews.llvm.org/D94584

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll
index 24b6d73d64c3..c8a0488e9056 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
   <vscale x 1 x i8>,
@@ -648,3 +648,93 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vcompress_vm_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vcompress.vm v16, v17, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vcompress_vm_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vcompress.vm v16, v18, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vcompress_vm_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vcompress.vm v16, v20, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vcompress_vm_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f64_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
+; CHECK-NEXT:    vcompress.vm v16, v8, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll
index a9d4dfcabcc9..e07add62505d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -mattr=+experimental-zfh \
-; RUN:   -mattr=+f -verify-machineinstrs \
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
@@ -7,10 +7,12 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
   i32);
 
 define <vscale x 1 x half> @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -27,10 +29,12 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
   i32);
 
 define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vfadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -47,10 +51,12 @@ declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -67,10 +73,12 @@ declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vfadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -87,10 +95,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -107,10 +117,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vfadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -127,10 +139,12 @@ declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -147,10 +161,12 @@ declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vfadd.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -167,10 +183,12 @@ declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -187,10 +205,14 @@ declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vle16.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfadd.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -207,10 +229,14 @@ declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
   i32);
 
 define <vscale x 32 x half> @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
@@ -227,10 +253,15 @@ declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
   i32);
 
 define <vscale x 32 x half> @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a1)
+; CHECK-NEXT:    vle16.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
+; CHECK-NEXT:    vfadd.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
@@ -247,10 +278,12 @@ declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -267,10 +300,12 @@ declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vfadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -287,10 +322,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -307,10 +344,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vfadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -327,10 +366,12 @@ declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -347,10 +388,12 @@ declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vfadd.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -367,10 +410,12 @@ declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -387,10 +432,14 @@ declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfadd.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -407,10 +456,14 @@ declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
   i32);
 
 define <vscale x 16 x float> @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
@@ -427,10 +480,15 @@ declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
   i32);
 
 define <vscale x 16 x float> @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a1)
+; CHECK-NEXT:    vle32.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
+; CHECK-NEXT:    vfadd.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK:       vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
@@ -441,16 +499,202 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfadd.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfadd.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vfadd.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a1)
+; CHECK-NEXT:    vle64.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
+; CHECK-NEXT:    vfadd.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 8 x double> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
 declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   i32);
 
 define <vscale x 1 x half> @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
@@ -467,10 +711,13 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
   i32);
 
 define <vscale x 1 x half> @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vfadd.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -487,10 +734,13 @@ declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
@@ -507,10 +757,13 @@ declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vfadd.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -527,10 +780,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
@@ -547,10 +803,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vfadd.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -567,10 +826,13 @@ declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
@@ -587,10 +849,13 @@ declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vfadd.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -607,10 +872,13 @@ declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
@@ -627,10 +895,13 @@ declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfadd.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -647,10 +918,13 @@ declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
   i32);
 
 define <vscale x 32 x half> @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
@@ -667,10 +941,15 @@ declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
   i32);
 
 define <vscale x 32 x half> @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    fmv.h.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
+; CHECK-NEXT:    vfadd.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
@@ -687,10 +966,13 @@ declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
@@ -707,10 +989,13 @@ declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfadd.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -727,10 +1012,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
@@ -747,10 +1035,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfadd.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -767,10 +1058,13 @@ declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
@@ -787,10 +1081,13 @@ declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfadd.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -807,10 +1104,13 @@ declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
@@ -827,10 +1127,13 @@ declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfadd.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -847,10 +1150,13 @@ declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
   i32);
 
 define <vscale x 16 x float> @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
@@ -867,10 +1173,15 @@ declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
   i32);
 
 define <vscale x 16 x float> @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
+; CHECK-NEXT:    vfadd.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK:       vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
@@ -880,3 +1191,221 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfadd.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    double %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfadd.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    double %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfadd.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    double %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
+  <vscale x 8 x double>,
+  double,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vfadd.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  double,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a1, 8(sp)
+; CHECK-NEXT:    sw a2, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a3, e64,m8,tu,mu
+; CHECK-NEXT:    vfadd.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    double %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll
index c7efd5a74a85..5b36ecd69bc1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll
@@ -1,16 +1,19 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16(
+declare <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfdiv_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16(
+  %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i32 %2)
@@ -18,19 +21,21 @@ entry:
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16(
+declare <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vfdiv.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16(
+  %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
@@ -40,17 +45,19 @@ entry:
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16(
+declare <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfdiv_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16(
+  %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i32 %2)
@@ -58,19 +65,21 @@ entry:
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16(
+declare <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vfdiv.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16(
+  %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
@@ -80,17 +89,19 @@ entry:
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16(
+declare <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfdiv_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i32 %2)
@@ -98,19 +109,21 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16(
+declare <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vfdiv.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -120,17 +133,19 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16(
+declare <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfdiv_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16(
+  %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i32 %2)
@@ -138,19 +153,21 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16(
+declare <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vfdiv.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16(
+  %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
@@ -160,17 +177,19 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16(
+declare <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfdiv_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16(
+  %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i32 %2)
@@ -178,19 +197,23 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16(
+declare <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vle16.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfdiv.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16(
+  %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
@@ -200,17 +223,21 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16(
+declare <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfdiv_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16(
+  %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
     i32 %2)
@@ -218,19 +245,24 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16(
+declare <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a1)
+; CHECK-NEXT:    vle16.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
+; CHECK-NEXT:    vfdiv.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16(
+  %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 32 x half> %2,
@@ -240,17 +272,19 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32(
+declare <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfdiv_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     i32 %2)
@@ -258,19 +292,21 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32(
+declare <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vfdiv.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
@@ -280,17 +316,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32(
+declare <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfdiv_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     i32 %2)
@@ -298,19 +336,21 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32(
+declare <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vfdiv.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
@@ -320,17 +360,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32(
+declare <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfdiv_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     i32 %2)
@@ -338,19 +380,21 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32(
+declare <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vfdiv.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
@@ -360,17 +404,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32(
+declare <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfdiv_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     i32 %2)
@@ -378,19 +424,23 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32(
+declare <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfdiv.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
@@ -400,17 +450,21 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32(
+declare <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfdiv_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     i32 %2)
@@ -418,19 +472,24 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32(
+declare <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a1)
+; CHECK-NEXT:    vle32.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
+; CHECK-NEXT:    vfdiv.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK:       vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x float> %2,
@@ -440,16 +499,202 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfdiv.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfdiv.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfdiv.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vfdiv.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a1)
+; CHECK-NEXT:    vle64.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
+; CHECK-NEXT:    vfdiv.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 8 x double> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
 declare <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfdiv_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
@@ -465,11 +710,14 @@ declare <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.f16(
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfdiv_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfdiv_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vfdiv.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -485,11 +733,14 @@ declare <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.f16(
   half,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfdiv_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
@@ -505,11 +756,14 @@ declare <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.f16(
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfdiv_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfdiv_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vfdiv.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -525,11 +779,14 @@ declare <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.f16(
   half,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfdiv_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
@@ -545,11 +802,14 @@ declare <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.f16(
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfdiv_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfdiv_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vfdiv.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -565,11 +825,14 @@ declare <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.f16(
   half,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfdiv_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
@@ -585,11 +848,14 @@ declare <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.f16(
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfdiv_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfdiv_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vfdiv.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -605,11 +871,14 @@ declare <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.f16(
   half,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfdiv_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
@@ -625,11 +894,14 @@ declare <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.f16(
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfdiv_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfdiv_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfdiv.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -645,11 +917,14 @@ declare <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.f16(
   half,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfdiv_vf_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
@@ -665,11 +940,16 @@ declare <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.f16(
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfdiv_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfdiv_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    fmv.h.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
+; CHECK-NEXT:    vfdiv.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
@@ -685,11 +965,14 @@ declare <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32(
   float,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfdiv_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
@@ -705,11 +988,14 @@ declare <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32(
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfdiv_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfdiv_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfdiv.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -725,11 +1011,14 @@ declare <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32(
   float,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfdiv_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
@@ -745,11 +1034,14 @@ declare <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32(
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfdiv_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfdiv_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfdiv.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -765,11 +1057,14 @@ declare <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32(
   float,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfdiv_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
@@ -785,11 +1080,14 @@ declare <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32(
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfdiv_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfdiv_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfdiv.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -805,11 +1103,14 @@ declare <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32(
   float,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfdiv_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
@@ -825,11 +1126,14 @@ declare <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32(
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfdiv_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfdiv_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfdiv.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -845,11 +1149,14 @@ declare <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32(
   float,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfdiv_vf_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
@@ -865,11 +1172,16 @@ declare <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32(
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfdiv_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfdiv_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
+; CHECK-NEXT:    vfdiv.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK:       vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
@@ -879,3 +1191,221 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfdiv_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfdiv.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    double %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfdiv_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfdiv.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    double %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfdiv_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfdiv.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    double %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64(
+  <vscale x 8 x double>,
+  double,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vfdiv.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  double,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfdiv_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a1, 8(sp)
+; CHECK-NEXT:    sw a2, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a3, e64,m8,tu,mu
+; CHECK-NEXT:    vfdiv.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    double %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
index c1f825444177..ccda55ffa9f9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
@@ -423,6 +423,148 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfmacc.vv v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfmacc.vv v16, v18, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfmacc.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfmacc.vv v16, v20, v28
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfmacc.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
 declare <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -854,3 +996,171 @@ entry:
 
   ret <vscale x 8 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfmacc.vf v16, ft0, v17
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfmacc.vf v16, ft0, v18
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    <vscale x 2 x double> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfmacc.vf v16, ft0, v18, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfmacc.vf v16, ft0, v20
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    <vscale x 4 x double> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfmacc.vf v16, ft0, v20, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
index 5111bb181c21..c6c848616469 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
@@ -423,6 +423,148 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfmadd.vv v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfmadd.vv v16, v18, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfmadd.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfmadd.vv v16, v20, v28
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfmadd.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
 declare <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -854,3 +996,171 @@ entry:
 
   ret <vscale x 8 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfmadd.vf v16, ft0, v17
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfmadd.vf v16, ft0, v18
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    <vscale x 2 x double> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfmadd.vf v16, ft0, v18, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfmadd.vf v16, ft0, v20
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    <vscale x 4 x double> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfmadd.vf v16, ft0, v20, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll
index 55a36db67232..7b0c0dc6eae4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll
@@ -1,4 +1,5 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
@@ -6,10 +7,12 @@ declare <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
   i32);
 
 define <vscale x 1 x half> @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vfmax.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -26,10 +29,12 @@ declare <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16(
   i32);
 
 define <vscale x 1 x half> @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vfmax.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -46,10 +51,12 @@ declare <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.nxv2f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vfmax.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -66,10 +73,12 @@ declare <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vfmax.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -86,10 +95,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.nxv4f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vfmax.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -106,10 +117,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vfmax.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -126,10 +139,12 @@ declare <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.nxv8f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vfmax.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -146,10 +161,12 @@ declare <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vfmax.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -166,10 +183,12 @@ declare <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.nxv16f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vfmax.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -186,10 +205,14 @@ declare <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vle16.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfmax.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -206,10 +229,14 @@ declare <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.nxv32f16(
   i32);
 
 define <vscale x 32 x half> @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfmax.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
@@ -226,10 +253,15 @@ declare <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16(
   i32);
 
 define <vscale x 32 x half> @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a1)
+; CHECK-NEXT:    vle16.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
+; CHECK-NEXT:    vfmax.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
@@ -246,10 +278,12 @@ declare <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vfmax.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -266,10 +300,12 @@ declare <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vfmax.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -286,10 +322,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vfmax.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -306,10 +344,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vfmax.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -326,10 +366,12 @@ declare <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vfmax.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -346,10 +388,12 @@ declare <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vfmax.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -366,10 +410,12 @@ declare <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vfmax.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -386,10 +432,14 @@ declare <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfmax.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -406,10 +456,14 @@ declare <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32(
   i32);
 
 define <vscale x 16 x float> @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfmax.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
@@ -426,10 +480,15 @@ declare <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32(
   i32);
 
 define <vscale x 16 x float> @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a1)
+; CHECK-NEXT:    vle32.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
+; CHECK-NEXT:    vfmax.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK:       vfmax.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
@@ -440,16 +499,202 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vfmax.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfmax.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vfmax.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfmax.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vfmax.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfmax.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vfmax.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a1)
+; CHECK-NEXT:    vle64.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
+; CHECK-NEXT:    vfmax.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 8 x double> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
 declare <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   i32);
 
 define <vscale x 1 x half> @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vfmax.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
@@ -466,10 +711,13 @@ declare <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.f16(
   i32);
 
 define <vscale x 1 x half> @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vfmax.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -486,10 +734,13 @@ declare <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vfmax.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
@@ -506,10 +757,13 @@ declare <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vfmax.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -526,10 +780,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vfmax.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
@@ -546,10 +803,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vfmax.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -566,10 +826,13 @@ declare <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vfmax.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
@@ -586,10 +849,13 @@ declare <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vfmax.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -606,10 +872,13 @@ declare <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vfmax.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
@@ -626,10 +895,13 @@ declare <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfmax.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -646,10 +918,13 @@ declare <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.f16(
   i32);
 
 define <vscale x 32 x half> @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfmax.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
@@ -666,10 +941,15 @@ declare <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.f16(
   i32);
 
 define <vscale x 32 x half> @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    fmv.h.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
+; CHECK-NEXT:    vfmax.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
@@ -686,10 +966,13 @@ declare <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vfmax.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
@@ -706,10 +989,13 @@ declare <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfmax.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -726,10 +1012,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vfmax.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
@@ -746,10 +1035,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfmax.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -766,10 +1058,13 @@ declare <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vfmax.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
@@ -786,10 +1081,13 @@ declare <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfmax.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -806,10 +1104,13 @@ declare <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vfmax.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
@@ -826,10 +1127,13 @@ declare <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfmax.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -846,10 +1150,13 @@ declare <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32(
   i32);
 
 define <vscale x 16 x float> @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfmax.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
@@ -866,10 +1173,15 @@ declare <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32(
   i32);
 
 define <vscale x 16 x float> @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
+; CHECK-NEXT:    vfmax.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK:       vfmax.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
@@ -879,3 +1191,221 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vfmax.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfmax.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    double %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vfmax.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfmax.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    double %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vfmax.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfmax.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    double %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64(
+  <vscale x 8 x double>,
+  double,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vfmax.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  double,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a1, 8(sp)
+; CHECK-NEXT:    sw a2, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a3, e64,m8,tu,mu
+; CHECK-NEXT:    vfmax.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    double %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll
index 3a264a8a41cb..fafa086c525f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll
@@ -1,4 +1,5 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
@@ -6,10 +7,12 @@ declare <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
   i32);
 
 define <vscale x 1 x half> @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vfmin.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -26,10 +29,12 @@ declare <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
   i32);
 
 define <vscale x 1 x half> @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vfmin.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -46,10 +51,12 @@ declare <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.nxv2f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vfmin.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -66,10 +73,12 @@ declare <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vfmin.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f16_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -86,10 +95,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.nxv4f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vfmin.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -106,10 +117,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vfmin.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -126,10 +139,12 @@ declare <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.nxv8f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vfmin.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -146,10 +161,12 @@ declare <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vfmin.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f16_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -166,10 +183,12 @@ declare <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.nxv16f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vfmin.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -186,10 +205,14 @@ declare <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vle16.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfmin.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f16_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -206,10 +229,14 @@ declare <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.nxv32f16(
   i32);
 
 define <vscale x 32 x half> @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfmin.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
@@ -226,10 +253,15 @@ declare <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16(
   i32);
 
 define <vscale x 32 x half> @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a1)
+; CHECK-NEXT:    vle16.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
+; CHECK-NEXT:    vfmin.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
@@ -246,10 +278,12 @@ declare <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vfmin.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -266,10 +300,12 @@ declare <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vfmin.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f32_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -286,10 +322,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vfmin.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -306,10 +344,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vfmin.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -326,10 +366,12 @@ declare <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vfmin.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -346,10 +388,12 @@ declare <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vfmin.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f32_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -366,10 +410,12 @@ declare <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vfmin.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -386,10 +432,14 @@ declare <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfmin.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f32_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -406,10 +456,14 @@ declare <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32(
   i32);
 
 define <vscale x 16 x float> @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfmin.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
@@ -426,10 +480,15 @@ declare <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32(
   i32);
 
 define <vscale x 16 x float> @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a1)
+; CHECK-NEXT:    vle32.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
+; CHECK-NEXT:    vfmin.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK:       vfmin.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
@@ -440,16 +499,202 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vfmin.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfmin.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vfmin.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfmin.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vfmin.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfmin.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vfmin.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a1)
+; CHECK-NEXT:    vle64.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
+; CHECK-NEXT:    vfmin.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 8 x double> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
 declare <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   i32);
 
 define <vscale x 1 x half> @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vfmin.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
@@ -466,10 +711,13 @@ declare <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.f16(
   i32);
 
 define <vscale x 1 x half> @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vfmin.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -486,10 +734,13 @@ declare <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vfmin.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
@@ -506,10 +757,13 @@ declare <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vfmin.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -526,10 +780,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vfmin.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
@@ -546,10 +803,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vfmin.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -566,10 +826,13 @@ declare <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vfmin.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
@@ -586,10 +849,13 @@ declare <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vfmin.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -606,10 +872,13 @@ declare <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vfmin.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
@@ -626,10 +895,13 @@ declare <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfmin.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -646,10 +918,13 @@ declare <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.f16(
   i32);
 
 define <vscale x 32 x half> @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfmin.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
@@ -666,10 +941,15 @@ declare <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.f16(
   i32);
 
 define <vscale x 32 x half> @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    fmv.h.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
+; CHECK-NEXT:    vfmin.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
@@ -686,10 +966,13 @@ declare <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vfmin.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
@@ -706,10 +989,13 @@ declare <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfmin.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -726,10 +1012,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vfmin.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
@@ -746,10 +1035,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfmin.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -766,10 +1058,13 @@ declare <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vfmin.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
@@ -786,10 +1081,13 @@ declare <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfmin.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -806,10 +1104,13 @@ declare <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vfmin.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
@@ -826,10 +1127,13 @@ declare <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfmin.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -846,10 +1150,13 @@ declare <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32(
   i32);
 
 define <vscale x 16 x float> @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfmin.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
@@ -866,10 +1173,15 @@ declare <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32(
   i32);
 
 define <vscale x 16 x float> @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
+; CHECK-NEXT:    vfmin.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK:       vfmin.vf {{v[0-9]+}}, {{v[0-9]+}}, {{(a|ft)[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
@@ -879,3 +1191,221 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vfmin.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfmin.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    double %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vfmin.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfmin.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    double %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vfmin.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfmin.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    double %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64(
+  <vscale x 8 x double>,
+  double,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vfmin.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  double,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a1, 8(sp)
+; CHECK-NEXT:    sw a2, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a3, e64,m8,tu,mu
+; CHECK-NEXT:    vfmin.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    double %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
index fd22ce05dcba..ed3877f1efde 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
@@ -423,6 +423,148 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfmsac.vv v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfmsac.vv v16, v18, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfmsac.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfmsac.vv v16, v20, v28
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfmsac.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
 declare <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -854,3 +996,171 @@ entry:
 
   ret <vscale x 8 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfmsac.vf v16, ft0, v17
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfmsac.vf v16, ft0, v18
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    <vscale x 2 x double> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfmsac.vf v16, ft0, v18, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfmsac.vf v16, ft0, v20
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    <vscale x 4 x double> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfmsac.vf v16, ft0, v20, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
index 0bd2f2f6925a..0411d6163af7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
@@ -423,6 +423,148 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfmsub.vv v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfmsub.vv v16, v18, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfmsub.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfmsub.vv v16, v20, v28
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfmsub.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
 declare <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -854,3 +996,171 @@ entry:
 
   ret <vscale x 8 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfmsub.vf v16, ft0, v17
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfmsub.vf v16, ft0, v18
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    <vscale x 2 x double> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfmsub.vf v16, ft0, v18, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfmsub.vf v16, ft0, v20
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    <vscale x 4 x double> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfmsub.vf v16, ft0, v20, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll
index a4d860054abb..2d6e80714e01 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll
@@ -1,16 +1,19 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16(
+declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfmul_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16(
+  %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i32 %2)
@@ -18,19 +21,21 @@ entry:
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16(
+declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vfmul.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16(
+  %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
@@ -40,17 +45,19 @@ entry:
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16(
+declare <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfmul_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16(
+  %a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i32 %2)
@@ -58,19 +65,21 @@ entry:
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16(
+declare <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vfmul.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16(
+  %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
@@ -80,17 +89,19 @@ entry:
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16(
+declare <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfmul_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i32 %2)
@@ -98,19 +109,21 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16(
+declare <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vfmul.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -120,17 +133,19 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16(
+declare <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfmul_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16(
+  %a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i32 %2)
@@ -138,19 +153,21 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16(
+declare <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vfmul.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16(
+  %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
@@ -160,17 +177,19 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16(
+declare <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfmul_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16(
+  %a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i32 %2)
@@ -178,19 +197,23 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16(
+declare <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vle16.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfmul.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16(
+  %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
@@ -200,17 +223,21 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16(
+declare <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfmul_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16(
+  %a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
     i32 %2)
@@ -218,19 +245,24 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16(
+declare <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a1)
+; CHECK-NEXT:    vle16.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
+; CHECK-NEXT:    vfmul.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16(
+  %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 32 x half> %2,
@@ -240,17 +272,19 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32(
+declare <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfmul_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     i32 %2)
@@ -258,19 +292,21 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32(
+declare <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vfmul.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
@@ -280,17 +316,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32(
+declare <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfmul_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     i32 %2)
@@ -298,19 +336,21 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32(
+declare <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vfmul.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
@@ -320,17 +360,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32(
+declare <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfmul_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     i32 %2)
@@ -338,19 +380,21 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32(
+declare <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vfmul.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
@@ -360,17 +404,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32(
+declare <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfmul_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     i32 %2)
@@ -378,19 +424,23 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32(
+declare <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfmul.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
@@ -400,17 +450,21 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32(
+declare <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfmul_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     i32 %2)
@@ -418,19 +472,24 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32(
+declare <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a1)
+; CHECK-NEXT:    vle32.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
+; CHECK-NEXT:    vfmul.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK:       vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x float> %2,
@@ -440,16 +499,202 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfmul.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfmul.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfmul.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vfmul.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a1)
+; CHECK-NEXT:    vle64.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
+; CHECK-NEXT:    vfmul.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 8 x double> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
 declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfmul_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
@@ -465,11 +710,14 @@ declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.f16(
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfmul_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vfmul.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -485,11 +733,14 @@ declare <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16(
   half,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfmul_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
@@ -505,11 +756,14 @@ declare <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.f16(
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfmul_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vfmul.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -525,11 +779,14 @@ declare <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16(
   half,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfmul_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
@@ -545,11 +802,14 @@ declare <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.f16(
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfmul_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vfmul.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -565,11 +825,14 @@ declare <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16(
   half,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfmul_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
@@ -585,11 +848,14 @@ declare <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.f16(
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfmul_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vfmul.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -605,11 +871,14 @@ declare <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16(
   half,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfmul_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
@@ -625,11 +894,14 @@ declare <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.f16(
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfmul_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfmul.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -645,11 +917,14 @@ declare <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16(
   half,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfmul_vf_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
@@ -665,11 +940,16 @@ declare <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.f16(
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfmul_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    fmv.h.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
+; CHECK-NEXT:    vfmul.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
@@ -685,11 +965,14 @@ declare <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32(
   float,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfmul_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
@@ -705,11 +988,14 @@ declare <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32(
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfmul_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfmul.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -725,11 +1011,14 @@ declare <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32(
   float,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfmul_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
@@ -745,11 +1034,14 @@ declare <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32(
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfmul_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfmul.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -765,11 +1057,14 @@ declare <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32(
   float,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfmul_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
@@ -785,11 +1080,14 @@ declare <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32(
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfmul_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfmul.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -805,11 +1103,14 @@ declare <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32(
   float,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfmul_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
@@ -825,11 +1126,14 @@ declare <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32(
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfmul_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfmul.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -845,11 +1149,14 @@ declare <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32(
   float,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfmul_vf_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
@@ -865,11 +1172,16 @@ declare <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32(
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfmul_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
+; CHECK-NEXT:    vfmul.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK:       vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
@@ -879,3 +1191,221 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfmul.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    double %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfmul.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    double %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfmul.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    double %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64(
+  <vscale x 8 x double>,
+  double,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vfmul.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  double,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a1, 8(sp)
+; CHECK-NEXT:    sw a2, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a3, e64,m8,tu,mu
+; CHECK-NEXT:    vfmul.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    double %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
index 4c122c14df6d..d4b5b22a80aa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
@@ -423,6 +423,148 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfnmacc.vv v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfnmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfnmacc.vv v16, v18, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfnmacc.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfnmacc.vv v16, v20, v28
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfnmacc.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
 declare <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -854,3 +996,171 @@ entry:
 
   ret <vscale x 8 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfnmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfnmacc.vf v16, ft0, v18
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    <vscale x 2 x double> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfnmacc.vf v16, ft0, v18, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfnmacc.vf v16, ft0, v20
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    <vscale x 4 x double> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfnmacc.vf v16, ft0, v20, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
index d1adf4ac4325..b808bfcf818a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
@@ -423,6 +423,148 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfnmadd.vv v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfnmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfnmadd.vv v16, v18, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfnmadd.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfnmadd.vv v16, v20, v28
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfnmadd.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
 declare <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -854,3 +996,171 @@ entry:
 
   ret <vscale x 8 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfnmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfnmadd.vf v16, ft0, v18
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    <vscale x 2 x double> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfnmadd.vf v16, ft0, v18, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfnmadd.vf v16, ft0, v20
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    <vscale x 4 x double> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfnmadd.vf v16, ft0, v20, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
index 6a62afaaf7f7..b59813aacefb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
@@ -423,6 +423,148 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfnmsac.vv v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfnmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfnmsac.vv v16, v18, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfnmsac.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfnmsac.vv v16, v20, v28
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfnmsac.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
 declare <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -854,3 +996,171 @@ entry:
 
   ret <vscale x 8 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfnmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfnmsac.vf v16, ft0, v18
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    <vscale x 2 x double> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfnmsac.vf v16, ft0, v18, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfnmsac.vf v16, ft0, v20
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    <vscale x 4 x double> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfnmsac.vf v16, ft0, v20, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
index b6a2a90aea75..34d89fe43b40 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
@@ -423,6 +423,148 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfnmsub.vv v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfnmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfnmsub.vv v16, v18, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfnmsub.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfnmsub.vv v16, v20, v28
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfnmsub.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
 declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -854,3 +996,171 @@ entry:
 
   ret <vscale x 8 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfnmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfnmsub.vf v16, ft0, v18
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    <vscale x 2 x double> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfnmsub.vf v16, ft0, v18, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfnmsub.vf v16, ft0, v20
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    <vscale x 4 x double> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfnmsub.vf v16, ft0, v20, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll
index 1c5a31853a19..11e0efb0bad6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll
@@ -1,15 +1,19 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfrdiv_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
@@ -25,11 +29,14 @@ declare <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfrdiv_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -45,11 +52,14 @@ declare <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16(
   half,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfrdiv_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
@@ -65,11 +75,14 @@ declare <vscale x 2 x half> @llvm.riscv.vfrdiv.mask.nxv2f16.f16(
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfrdiv_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfrdiv.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -85,11 +98,14 @@ declare <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16(
   half,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfrdiv_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
@@ -105,11 +121,14 @@ declare <vscale x 4 x half> @llvm.riscv.vfrdiv.mask.nxv4f16.f16(
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfrdiv_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfrdiv.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -125,11 +144,14 @@ declare <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16(
   half,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfrdiv_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
@@ -145,11 +167,14 @@ declare <vscale x 8 x half> @llvm.riscv.vfrdiv.mask.nxv8f16.f16(
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfrdiv_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfrdiv.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -165,11 +190,14 @@ declare <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16(
   half,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfrdiv_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
@@ -185,11 +213,14 @@ declare <vscale x 16 x half> @llvm.riscv.vfrdiv.mask.nxv16f16.f16(
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfrdiv_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfrdiv.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -205,11 +236,14 @@ declare <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16(
   half,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfrdiv_vf_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
@@ -225,11 +259,16 @@ declare <vscale x 32 x half> @llvm.riscv.vfrdiv.mask.nxv32f16.f16(
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfrdiv_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    fmv.h.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfrdiv.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
@@ -245,11 +284,14 @@ declare <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32(
   float,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfrdiv_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
@@ -265,11 +307,14 @@ declare <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32(
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfrdiv_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -285,11 +330,14 @@ declare <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32(
   float,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfrdiv_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
@@ -305,11 +353,14 @@ declare <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32(
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfrdiv_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -325,11 +376,14 @@ declare <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32(
   float,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfrdiv_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
@@ -345,11 +399,14 @@ declare <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32(
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfrdiv_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -365,11 +422,14 @@ declare <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32(
   float,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfrdiv_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
@@ -385,11 +445,14 @@ declare <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32(
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfrdiv_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -405,11 +468,14 @@ declare <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32(
   float,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfrdiv_vf_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
@@ -425,11 +491,16 @@ declare <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32(
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfrdiv_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK:       vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
@@ -439,3 +510,221 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    double %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    double %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    double %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64(
+  <vscale x 8 x double>,
+  double,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  double,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a1, 8(sp)
+; CHECK-NEXT:    sw a2, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a3, e64,m8,tu,mu
+; CHECK-NEXT:    vfrdiv.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    double %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
index 6be0bf7e9206..bead3f39b259 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
@@ -1,4 +1,5 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
@@ -7,10 +8,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
     <vscale x 1 x half> %1,
@@ -20,7 +23,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
   <vscale x 4 x half>,
@@ -28,11 +31,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1(
     <vscale x 4 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 4 x half> %2,
@@ -49,10 +54,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
     <vscale x 2 x half> %1,
@@ -62,7 +69,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1(
   <vscale x 4 x half>,
   <vscale x 2 x half>,
   <vscale x 4 x half>,
@@ -70,11 +77,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1(
     <vscale x 4 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 4 x half> %2,
@@ -91,10 +100,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -104,7 +115,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
@@ -112,11 +123,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -133,10 +146,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v18, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
     <vscale x 8 x half> %1,
@@ -146,7 +161,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1(
   <vscale x 4 x half>,
   <vscale x 8 x half>,
   <vscale x 4 x half>,
@@ -154,11 +169,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v18, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1(
     <vscale x 4 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 4 x half> %2,
@@ -175,10 +192,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v20, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
     <vscale x 16 x half> %1,
@@ -188,7 +207,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1(
   <vscale x 4 x half>,
   <vscale x 16 x half>,
   <vscale x 4 x half>,
@@ -196,11 +215,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v20, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1(
     <vscale x 4 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 4 x half> %2,
@@ -217,10 +238,14 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v8, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
     <vscale x 32 x half> %1,
@@ -230,7 +255,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1(
   <vscale x 4 x half>,
   <vscale x 32 x half>,
   <vscale x 4 x half>,
@@ -238,11 +263,15 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v8, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1(
     <vscale x 4 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 4 x half> %2,
@@ -259,10 +288,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
     <vscale x 1 x float> %1,
@@ -272,7 +303,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32(
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1(
   <vscale x 2 x float>,
   <vscale x 1 x float>,
   <vscale x 2 x float>,
@@ -280,11 +311,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1(
     <vscale x 2 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 2 x float> %2,
@@ -301,10 +334,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -314,7 +349,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32(
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
@@ -322,11 +357,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
@@ -343,10 +380,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v18, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
     <vscale x 4 x float> %1,
@@ -356,7 +395,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32(
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1(
   <vscale x 2 x float>,
   <vscale x 4 x float>,
   <vscale x 2 x float>,
@@ -364,11 +403,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v18, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1(
     <vscale x 2 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 2 x float> %2,
@@ -385,10 +426,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v20, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
     <vscale x 8 x float> %1,
@@ -398,7 +441,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32(
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1(
   <vscale x 2 x float>,
   <vscale x 8 x float>,
   <vscale x 2 x float>,
@@ -406,11 +449,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v20, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1(
     <vscale x 2 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 2 x float> %2,
@@ -427,10 +472,14 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v8, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
     <vscale x 16 x float> %1,
@@ -440,7 +489,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32(
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1(
   <vscale x 2 x float>,
   <vscale x 16 x float>,
   <vscale x 2 x float>,
@@ -448,11 +497,15 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v8, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1(
     <vscale x 2 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 2 x float> %2,
@@ -461,3 +514,191 @@ entry:
 
   ret <vscale x 2 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64(
+  <vscale x 1 x double>,
+  <vscale x 2 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v18, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64(
+    <vscale x 1 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1(
+  <vscale x 1 x double>,
+  <vscale x 2 x double>,
+  <vscale x 1 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v18, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1(
+    <vscale x 1 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64(
+  <vscale x 1 x double>,
+  <vscale x 4 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v20, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64(
+    <vscale x 1 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1(
+  <vscale x 1 x double>,
+  <vscale x 4 x double>,
+  <vscale x 1 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v20, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1(
+    <vscale x 1 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64(
+  <vscale x 1 x double>,
+  <vscale x 8 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v8, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64(
+    <vscale x 1 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1(
+  <vscale x 1 x double>,
+  <vscale x 8 x double>,
+  <vscale x 1 x double>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vfredmax.vs v16, v8, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1(
+    <vscale x 1 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
index 4445dfd8057e..692ff7ecd93f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
@@ -1,4 +1,5 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
@@ -7,10 +8,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
     <vscale x 1 x half> %1,
@@ -20,7 +23,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
   <vscale x 4 x half>,
@@ -28,11 +31,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1(
     <vscale x 4 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 4 x half> %2,
@@ -49,10 +54,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
     <vscale x 2 x half> %1,
@@ -62,7 +69,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1(
   <vscale x 4 x half>,
   <vscale x 2 x half>,
   <vscale x 4 x half>,
@@ -70,11 +77,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1(
     <vscale x 4 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 4 x half> %2,
@@ -91,10 +100,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -104,7 +115,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
@@ -112,11 +123,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -133,10 +146,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v18, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
     <vscale x 8 x half> %1,
@@ -146,7 +161,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1(
   <vscale x 4 x half>,
   <vscale x 8 x half>,
   <vscale x 4 x half>,
@@ -154,11 +169,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v18, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1(
     <vscale x 4 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 4 x half> %2,
@@ -175,10 +192,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v20, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
     <vscale x 16 x half> %1,
@@ -188,7 +207,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1(
   <vscale x 4 x half>,
   <vscale x 16 x half>,
   <vscale x 4 x half>,
@@ -196,11 +215,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v20, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1(
     <vscale x 4 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 4 x half> %2,
@@ -217,10 +238,14 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v8, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
     <vscale x 32 x half> %1,
@@ -230,7 +255,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1(
   <vscale x 4 x half>,
   <vscale x 32 x half>,
   <vscale x 4 x half>,
@@ -238,11 +263,15 @@ declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v8, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1(
     <vscale x 4 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 4 x half> %2,
@@ -259,10 +288,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
     <vscale x 1 x float> %1,
@@ -272,7 +303,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32(
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1(
   <vscale x 2 x float>,
   <vscale x 1 x float>,
   <vscale x 2 x float>,
@@ -280,11 +311,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1(
     <vscale x 2 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 2 x float> %2,
@@ -301,10 +334,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -314,7 +349,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32(
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
@@ -322,11 +357,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
@@ -343,10 +380,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v18, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
     <vscale x 4 x float> %1,
@@ -356,7 +395,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32(
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1(
   <vscale x 2 x float>,
   <vscale x 4 x float>,
   <vscale x 2 x float>,
@@ -364,11 +403,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v18, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1(
     <vscale x 2 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 2 x float> %2,
@@ -385,10 +426,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v20, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
     <vscale x 8 x float> %1,
@@ -398,7 +441,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32(
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1(
   <vscale x 2 x float>,
   <vscale x 8 x float>,
   <vscale x 2 x float>,
@@ -406,11 +449,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v20, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1(
     <vscale x 2 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 2 x float> %2,
@@ -427,10 +472,14 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v8, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
     <vscale x 16 x float> %1,
@@ -440,7 +489,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32(
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1(
   <vscale x 2 x float>,
   <vscale x 16 x float>,
   <vscale x 2 x float>,
@@ -448,11 +497,15 @@ declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v8, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1(
     <vscale x 2 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 2 x float> %2,
@@ -461,3 +514,191 @@ entry:
 
   ret <vscale x 2 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.nxv1i1(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.nxv1i1(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64(
+  <vscale x 1 x double>,
+  <vscale x 2 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v18, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64(
+    <vscale x 1 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.nxv2i1(
+  <vscale x 1 x double>,
+  <vscale x 2 x double>,
+  <vscale x 1 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v18, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.nxv2i1(
+    <vscale x 1 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64(
+  <vscale x 1 x double>,
+  <vscale x 4 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v20, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64(
+    <vscale x 1 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.nxv4i1(
+  <vscale x 1 x double>,
+  <vscale x 4 x double>,
+  <vscale x 1 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v20, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.nxv4i1(
+    <vscale x 1 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64(
+  <vscale x 1 x double>,
+  <vscale x 8 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v8, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64(
+    <vscale x 1 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.nxv8i1(
+  <vscale x 1 x double>,
+  <vscale x 8 x double>,
+  <vscale x 1 x double>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vfredmin.vs v16, v8, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.nxv8i1(
+    <vscale x 1 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
index b3d86138d2c2..8c4e530ef7e0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
@@ -1,4 +1,5 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
@@ -7,10 +8,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
     <vscale x 1 x half> %1,
@@ -20,7 +23,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
   <vscale x 4 x half>,
@@ -28,11 +31,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1(
     <vscale x 4 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 4 x half> %2,
@@ -49,10 +54,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
     <vscale x 2 x half> %1,
@@ -62,7 +69,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1(
   <vscale x 4 x half>,
   <vscale x 2 x half>,
   <vscale x 4 x half>,
@@ -70,11 +77,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1(
     <vscale x 4 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 4 x half> %2,
@@ -91,10 +100,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -104,7 +115,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
@@ -112,11 +123,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -133,10 +146,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v18, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
     <vscale x 8 x half> %1,
@@ -146,7 +161,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1(
   <vscale x 4 x half>,
   <vscale x 8 x half>,
   <vscale x 4 x half>,
@@ -154,11 +169,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v18, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1(
     <vscale x 4 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 4 x half> %2,
@@ -175,10 +192,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v20, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
     <vscale x 16 x half> %1,
@@ -188,7 +207,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1(
   <vscale x 4 x half>,
   <vscale x 16 x half>,
   <vscale x 4 x half>,
@@ -196,11 +215,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v20, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1(
     <vscale x 4 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 4 x half> %2,
@@ -217,10 +238,14 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v8, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
     <vscale x 32 x half> %1,
@@ -230,7 +255,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1(
   <vscale x 4 x half>,
   <vscale x 32 x half>,
   <vscale x 4 x half>,
@@ -238,11 +263,15 @@ declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v8, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1(
     <vscale x 4 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 4 x half> %2,
@@ -259,10 +288,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
     <vscale x 1 x float> %1,
@@ -272,7 +303,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32(
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1(
   <vscale x 2 x float>,
   <vscale x 1 x float>,
   <vscale x 2 x float>,
@@ -280,11 +311,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1(
     <vscale x 2 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 2 x float> %2,
@@ -301,10 +334,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -314,7 +349,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32(
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
@@ -322,11 +357,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
@@ -343,10 +380,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v18, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
     <vscale x 4 x float> %1,
@@ -356,7 +395,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32(
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1(
   <vscale x 2 x float>,
   <vscale x 4 x float>,
   <vscale x 2 x float>,
@@ -364,11 +403,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v18, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1(
     <vscale x 2 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 2 x float> %2,
@@ -385,10 +426,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v20, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
     <vscale x 8 x float> %1,
@@ -398,7 +441,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32(
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1(
   <vscale x 2 x float>,
   <vscale x 8 x float>,
   <vscale x 2 x float>,
@@ -406,11 +449,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v20, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1(
     <vscale x 2 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 2 x float> %2,
@@ -427,10 +472,14 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v8, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
     <vscale x 16 x float> %1,
@@ -440,7 +489,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32(
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1(
   <vscale x 2 x float>,
   <vscale x 16 x float>,
   <vscale x 2 x float>,
@@ -448,11 +497,15 @@ declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v8, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1(
     <vscale x 2 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 2 x float> %2,
@@ -461,3 +514,191 @@ entry:
 
   ret <vscale x 2 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.nxv1i1(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.nxv1i1(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64(
+  <vscale x 1 x double>,
+  <vscale x 2 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v18, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64(
+    <vscale x 1 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.nxv2i1(
+  <vscale x 1 x double>,
+  <vscale x 2 x double>,
+  <vscale x 1 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v18, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.nxv2i1(
+    <vscale x 1 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64(
+  <vscale x 1 x double>,
+  <vscale x 4 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v20, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64(
+    <vscale x 1 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.nxv4i1(
+  <vscale x 1 x double>,
+  <vscale x 4 x double>,
+  <vscale x 1 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v20, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.nxv4i1(
+    <vscale x 1 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64(
+  <vscale x 1 x double>,
+  <vscale x 8 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v8, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64(
+    <vscale x 1 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.nxv8i1(
+  <vscale x 1 x double>,
+  <vscale x 8 x double>,
+  <vscale x 1 x double>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vfredosum.vs v16, v8, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.nxv8i1(
+    <vscale x 1 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
index 6747a6655612..69070dc6133c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
@@ -1,4 +1,5 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
@@ -7,10 +8,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
     <vscale x 1 x half> %1,
@@ -20,7 +23,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
   <vscale x 4 x half>,
@@ -28,11 +31,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1(
     <vscale x 4 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 4 x half> %2,
@@ -49,10 +54,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv2f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
     <vscale x 2 x half> %1,
@@ -62,7 +69,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1(
   <vscale x 4 x half>,
   <vscale x 2 x half>,
   <vscale x 4 x half>,
@@ -70,11 +77,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1(
     <vscale x 4 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 4 x half> %2,
@@ -91,10 +100,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv4f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -104,7 +115,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
@@ -112,11 +123,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -133,10 +146,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv8f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v18, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
     <vscale x 8 x half> %1,
@@ -146,7 +161,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1(
   <vscale x 4 x half>,
   <vscale x 8 x half>,
   <vscale x 4 x half>,
@@ -154,11 +169,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v18, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1(
     <vscale x 4 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 4 x half> %2,
@@ -175,10 +192,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv16f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v20, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
     <vscale x 16 x half> %1,
@@ -188,7 +207,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1(
   <vscale x 4 x half>,
   <vscale x 16 x half>,
   <vscale x 4 x half>,
@@ -196,11 +215,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v20, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1(
     <vscale x 4 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 4 x half> %2,
@@ -217,10 +238,14 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv32f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v8, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
     <vscale x 32 x half> %1,
@@ -230,7 +255,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16(
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1(
   <vscale x 4 x half>,
   <vscale x 32 x half>,
   <vscale x 4 x half>,
@@ -238,11 +263,15 @@ declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v8, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1(
     <vscale x 4 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 4 x half> %2,
@@ -259,10 +288,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
     <vscale x 1 x float> %1,
@@ -272,7 +303,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32(
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1(
   <vscale x 2 x float>,
   <vscale x 1 x float>,
   <vscale x 2 x float>,
@@ -280,11 +311,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1(
     <vscale x 2 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 2 x float> %2,
@@ -301,10 +334,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -314,7 +349,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32(
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
@@ -322,11 +357,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
@@ -343,10 +380,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v18, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
     <vscale x 4 x float> %1,
@@ -356,7 +395,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32(
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1(
   <vscale x 2 x float>,
   <vscale x 4 x float>,
   <vscale x 2 x float>,
@@ -364,11 +403,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v18, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1(
     <vscale x 2 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 2 x float> %2,
@@ -385,10 +426,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v20, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
     <vscale x 8 x float> %1,
@@ -398,7 +441,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32(
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1(
   <vscale x 2 x float>,
   <vscale x 8 x float>,
   <vscale x 2 x float>,
@@ -406,11 +449,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v20, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1(
     <vscale x 2 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 2 x float> %2,
@@ -427,10 +472,14 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v8, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
     <vscale x 16 x float> %1,
@@ -440,7 +489,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32(
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1(
   <vscale x 2 x float>,
   <vscale x 16 x float>,
   <vscale x 2 x float>,
@@ -448,11 +497,15 @@ declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v8, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1(
     <vscale x 2 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 2 x float> %2,
@@ -461,3 +514,191 @@ entry:
 
   ret <vscale x 2 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.nxv1i1(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.nxv1i1(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64(
+  <vscale x 1 x double>,
+  <vscale x 2 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v18, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64(
+    <vscale x 1 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.nxv2i1(
+  <vscale x 1 x double>,
+  <vscale x 2 x double>,
+  <vscale x 1 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v18, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.nxv2i1(
+    <vscale x 1 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64(
+  <vscale x 1 x double>,
+  <vscale x 4 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v20, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64(
+    <vscale x 1 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.nxv4i1(
+  <vscale x 1 x double>,
+  <vscale x 4 x double>,
+  <vscale x 1 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v20, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.nxv4i1(
+    <vscale x 1 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64(
+  <vscale x 1 x double>,
+  <vscale x 8 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v8, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64(
+    <vscale x 1 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.nxv8i1(
+  <vscale x 1 x double>,
+  <vscale x 8 x double>,
+  <vscale x 1 x double>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vfredsum.vs v16, v8, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.nxv8i1(
+    <vscale x 1 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll
index a6196865464d..112139535872 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -mattr=+experimental-zfh \
-; RUN:   -mattr=+f -verify-machineinstrs \
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
   <vscale x 1 x half>,
@@ -7,10 +7,13 @@ declare <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
   i32);
 
 define <vscale x 1 x half> @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
@@ -27,10 +30,13 @@ declare <vscale x 1 x half> @llvm.riscv.vfrsub.mask.nxv1f16.f16(
   i32);
 
 define <vscale x 1 x half> @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vfrsub.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -47,10 +53,13 @@ declare <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
@@ -67,10 +76,13 @@ declare <vscale x 2 x half> @llvm.riscv.vfrsub.mask.nxv2f16.f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vfrsub.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -87,10 +99,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
@@ -107,10 +122,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfrsub.mask.nxv4f16.f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vfrsub.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -127,10 +145,13 @@ declare <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
@@ -147,10 +168,13 @@ declare <vscale x 8 x half> @llvm.riscv.vfrsub.mask.nxv8f16.f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vfrsub.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -167,10 +191,13 @@ declare <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
@@ -187,10 +214,13 @@ declare <vscale x 16 x half> @llvm.riscv.vfrsub.mask.nxv16f16.f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfrsub.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -207,10 +237,13 @@ declare <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16(
   i32);
 
 define <vscale x 32 x half> @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
@@ -227,10 +260,15 @@ declare <vscale x 32 x half> @llvm.riscv.vfrsub.mask.nxv32f16.f16(
   i32);
 
 define <vscale x 32 x half> @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    fmv.h.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
+; CHECK-NEXT:    vfrsub.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsub.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
@@ -247,10 +285,13 @@ declare <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
@@ -267,10 +308,13 @@ declare <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfrsub.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -287,10 +331,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
@@ -307,10 +354,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfrsub.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -327,10 +377,13 @@ declare <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
@@ -347,10 +400,13 @@ declare <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfrsub.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -367,10 +423,13 @@ declare <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
@@ -387,10 +446,13 @@ declare <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfrsub.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -407,10 +469,13 @@ declare <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32(
   i32);
 
 define <vscale x 16 x float> @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
@@ -427,10 +492,15 @@ declare <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32(
   i32);
 
 define <vscale x 16 x float> @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
+; CHECK-NEXT:    vfrsub.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK:       vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
@@ -440,3 +510,221 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfrsub.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    double %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfrsub.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    double %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfrsub.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    double %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64(
+  <vscale x 8 x double>,
+  double,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vfrsub.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  double,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a1, 8(sp)
+; CHECK-NEXT:    sw a2, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a3, e64,m8,tu,mu
+; CHECK-NEXT:    vfrsub.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    double %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll
index bfc666d6254b..1786b8ddabe5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll
@@ -1,16 +1,19 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16(
+declare <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16(
+  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i32 %2)
@@ -18,19 +21,21 @@ entry:
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16(
+declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16(
+  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
@@ -40,17 +45,19 @@ entry:
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16(
+declare <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16(
+  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i32 %2)
@@ -58,19 +65,21 @@ entry:
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16(
+declare <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16(
+  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
@@ -80,17 +89,19 @@ entry:
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16(
+declare <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i32 %2)
@@ -98,19 +109,21 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16(
+declare <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -120,17 +133,19 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16(
+declare <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16(
+  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i32 %2)
@@ -138,19 +153,21 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16(
+declare <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16(
+  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
@@ -160,17 +177,19 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16(
+declare <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16(
+  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i32 %2)
@@ -178,19 +197,23 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16(
+declare <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vle16.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16(
+  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
@@ -200,17 +223,21 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16(
+declare <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16(
+  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
     i32 %2)
@@ -218,19 +245,24 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16(
+declare <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a1)
+; CHECK-NEXT:    vle16.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16(
+  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 32 x half> %2,
@@ -240,17 +272,19 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32(
+declare <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     i32 %2)
@@ -258,19 +292,21 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32(
+declare <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
@@ -280,17 +316,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32(
+declare <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     i32 %2)
@@ -298,19 +336,21 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32(
+declare <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
@@ -320,17 +360,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32(
+declare <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     i32 %2)
@@ -338,19 +380,21 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32(
+declare <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
@@ -360,17 +404,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32(
+declare <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     i32 %2)
@@ -378,19 +424,23 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32(
+declare <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
@@ -400,17 +450,21 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32(
+declare <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     i32 %2)
@@ -418,19 +472,24 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32(
+declare <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a1)
+; CHECK-NEXT:    vle32.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK:       vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x float> %2,
@@ -440,16 +499,202 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a1)
+; CHECK-NEXT:    vle64.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
+; CHECK-NEXT:    vfsgnj.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 8 x double> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
 declare <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfsgnj_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
@@ -465,11 +710,14 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.f16(
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -485,11 +733,14 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.f16(
   half,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfsgnj_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
@@ -505,11 +756,14 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.f16(
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -525,11 +779,14 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.f16(
   half,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfsgnj_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
@@ -545,11 +802,14 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.f16(
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -565,11 +825,14 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.f16(
   half,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfsgnj_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
@@ -585,11 +848,14 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.f16(
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -605,11 +871,14 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.f16(
   half,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfsgnj_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
@@ -625,11 +894,14 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.f16(
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -645,11 +917,14 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.f16(
   half,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfsgnj_vf_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
@@ -665,11 +940,16 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.f16(
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    fmv.h.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
@@ -685,11 +965,14 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32(
   float,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfsgnj_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
@@ -705,11 +988,14 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32(
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -725,11 +1011,14 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32(
   float,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfsgnj_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
@@ -745,11 +1034,14 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32(
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -765,11 +1057,14 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32(
   float,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfsgnj_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
@@ -785,11 +1080,14 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32(
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -805,11 +1103,14 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32(
   float,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfsgnj_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
@@ -825,11 +1126,14 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32(
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -845,11 +1149,14 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32(
   float,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfsgnj_vf_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
@@ -865,11 +1172,16 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32(
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK:       vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
@@ -879,3 +1191,221 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    double %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    double %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    double %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64(
+  <vscale x 8 x double>,
+  double,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  double,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a1, 8(sp)
+; CHECK-NEXT:    sw a2, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a3, e64,m8,tu,mu
+; CHECK-NEXT:    vfsgnj.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    double %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll
index 9831d2b98d65..98c98b071552 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll
@@ -1,16 +1,19 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16(
+declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16(
+  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i32 %2)
@@ -18,19 +21,21 @@ entry:
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16(
+declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16(
+  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
@@ -40,17 +45,19 @@ entry:
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16(
+declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16(
+  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i32 %2)
@@ -58,19 +65,21 @@ entry:
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16(
+declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16(
+  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
@@ -80,17 +89,19 @@ entry:
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16(
+declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i32 %2)
@@ -98,19 +109,21 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16(
+declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -120,17 +133,19 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16(
+declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16(
+  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i32 %2)
@@ -138,19 +153,21 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16(
+declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16(
+  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
@@ -160,17 +177,19 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16(
+declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16(
+  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i32 %2)
@@ -178,19 +197,23 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16(
+declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vle16.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16(
+  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
@@ -200,17 +223,21 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16(
+declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16(
+  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
     i32 %2)
@@ -218,19 +245,24 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16(
+declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a1)
+; CHECK-NEXT:    vle16.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16(
+  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 32 x half> %2,
@@ -240,17 +272,19 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32(
+declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     i32 %2)
@@ -258,19 +292,21 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32(
+declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
@@ -280,17 +316,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32(
+declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     i32 %2)
@@ -298,19 +336,21 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32(
+declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
@@ -320,17 +360,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32(
+declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     i32 %2)
@@ -338,19 +380,21 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32(
+declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
@@ -360,17 +404,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32(
+declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     i32 %2)
@@ -378,19 +424,23 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32(
+declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
@@ -400,17 +450,21 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32(
+declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     i32 %2)
@@ -418,19 +472,24 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32(
+declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a1)
+; CHECK-NEXT:    vle32.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK:       vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x float> %2,
@@ -440,16 +499,202 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a1)
+; CHECK-NEXT:    vle64.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
+; CHECK-NEXT:    vfsgnjn.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 8 x double> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
 declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfsgnjn_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
@@ -465,11 +710,14 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.f16(
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfsgnjn_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnjn_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -485,11 +733,14 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.f16(
   half,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfsgnjn_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
@@ -505,11 +756,14 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.f16(
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfsgnjn_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnjn_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -525,11 +779,14 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.f16(
   half,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfsgnjn_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
@@ -545,11 +802,14 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.f16(
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfsgnjn_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnjn_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -565,11 +825,14 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.f16(
   half,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfsgnjn_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
@@ -585,11 +848,14 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.f16(
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfsgnjn_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnjn_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -605,11 +871,14 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.f16(
   half,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfsgnjn_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
@@ -625,11 +894,14 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.f16(
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfsgnjn_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnjn_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -645,11 +917,14 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.f16(
   half,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfsgnjn_vf_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
@@ -665,11 +940,16 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.f16(
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    fmv.h.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
@@ -685,11 +965,14 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32(
   float,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfsgnjn_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
@@ -705,11 +988,14 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32(
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfsgnjn_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnjn_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -725,11 +1011,14 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32(
   float,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfsgnjn_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
@@ -745,11 +1034,14 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.f32(
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfsgnjn_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnjn_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -765,11 +1057,14 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32(
   float,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfsgnjn_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
@@ -785,11 +1080,14 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.f32(
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfsgnjn_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnjn_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -805,11 +1103,14 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32(
   float,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfsgnjn_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
@@ -825,11 +1126,14 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.f32(
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfsgnjn_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnjn_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -845,11 +1149,14 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32(
   float,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfsgnjn_vf_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
@@ -865,11 +1172,16 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.f32(
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK:       vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
@@ -879,3 +1191,221 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfsgnjn_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    double %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfsgnjn_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    double %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfsgnjn_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    double %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64(
+  <vscale x 8 x double>,
+  double,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  double,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfsgnjn_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a1, 8(sp)
+; CHECK-NEXT:    sw a2, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a3, e64,m8,tu,mu
+; CHECK-NEXT:    vfsgnjn.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    double %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll
index 2d7b198ce63a..26f32ba7b783 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll
@@ -1,16 +1,19 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16(
+declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16(
+  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i32 %2)
@@ -18,19 +21,21 @@ entry:
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16(
+declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16(
+  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
@@ -40,17 +45,19 @@ entry:
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16(
+declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16(
+  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i32 %2)
@@ -58,19 +65,21 @@ entry:
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16(
+declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16(
+  %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
@@ -80,17 +89,19 @@ entry:
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16(
+declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i32 %2)
@@ -98,19 +109,21 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16(
+declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -120,17 +133,19 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16(
+declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16(
+  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i32 %2)
@@ -138,19 +153,21 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16(
+declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16(
+  %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
@@ -160,17 +177,19 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16(
+declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16(
+  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i32 %2)
@@ -178,19 +197,23 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16(
+declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vle16.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16(
+  %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
@@ -200,17 +223,21 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16(
+declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16(
+  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
     i32 %2)
@@ -218,19 +245,24 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16(
+declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a1)
+; CHECK-NEXT:    vle16.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16(
+  %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 32 x half> %2,
@@ -240,17 +272,19 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32(
+declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     i32 %2)
@@ -258,19 +292,21 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32(
+declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
@@ -280,17 +316,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32(
+declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     i32 %2)
@@ -298,19 +336,21 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32(
+declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
@@ -320,17 +360,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32(
+declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     i32 %2)
@@ -338,19 +380,21 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32(
+declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
@@ -360,17 +404,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32(
+declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     i32 %2)
@@ -378,19 +424,23 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32(
+declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
@@ -400,17 +450,21 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32(
+declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     i32 %2)
@@ -418,19 +472,24 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32(
+declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a1)
+; CHECK-NEXT:    vle32.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK:       vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x float> %2,
@@ -440,16 +499,202 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a1)
+; CHECK-NEXT:    vle64.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
+; CHECK-NEXT:    vfsgnjx.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 8 x double> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
 declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfsgnjx_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
@@ -465,11 +710,14 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.f16(
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfsgnjx_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vfsgnjx_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -485,11 +733,14 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.f16(
   half,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfsgnjx_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
@@ -505,11 +756,14 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.f16(
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfsgnjx_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vfsgnjx_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -525,11 +779,14 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.f16(
   half,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfsgnjx_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
@@ -545,11 +802,14 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.f16(
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfsgnjx_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vfsgnjx_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -565,11 +825,14 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.f16(
   half,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfsgnjx_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
@@ -585,11 +848,14 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.f16(
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfsgnjx_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vfsgnjx_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -605,11 +871,14 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.f16(
   half,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfsgnjx_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
@@ -625,11 +894,14 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.f16(
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfsgnjx_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vfsgnjx_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -645,11 +917,14 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.f16(
   half,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfsgnjx_vf_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
@@ -665,11 +940,16 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.f16(
   <vscale x 32 x i1>,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    fmv.h.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
@@ -685,11 +965,14 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32(
   float,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfsgnjx_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
@@ -705,11 +988,14 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32(
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfsgnjx_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfsgnjx_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -725,11 +1011,14 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32(
   float,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfsgnjx_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
@@ -745,11 +1034,14 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.f32(
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfsgnjx_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfsgnjx_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -765,11 +1057,14 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32(
   float,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfsgnjx_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
@@ -785,11 +1080,14 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.f32(
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfsgnjx_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfsgnjx_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -805,11 +1103,14 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32(
   float,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfsgnjx_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
@@ -825,11 +1126,14 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.f32(
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfsgnjx_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfsgnjx_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -845,11 +1149,14 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32(
   float,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfsgnjx_vf_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
@@ -865,11 +1172,16 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.f32(
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK:       vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
@@ -879,3 +1191,221 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfsgnjx_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    double %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfsgnjx_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    double %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfsgnjx_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    double %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64(
+  <vscale x 8 x double>,
+  double,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  double,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfsgnjx_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a1, 8(sp)
+; CHECK-NEXT:    sw a2, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a3, e64,m8,tu,mu
+; CHECK-NEXT:    vfsgnjx.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    double %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll
index ffe994199c86..dec98437d55a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
   <vscale x 1 x half>,
@@ -510,3 +510,221 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vfslide1down.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfslide1down.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    double %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vfslide1down.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfslide1down.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    double %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vfslide1down.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfslide1down.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    double %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64(
+  <vscale x 8 x double>,
+  double,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vfslide1down.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  double,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a1, 8(sp)
+; CHECK-NEXT:    sw a2, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a3, e64,m8,tu,mu
+; CHECK-NEXT:    vfslide1down.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    double %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll
index 7147ebb6f85a..b4163d6a21eb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
   <vscale x 1 x half>,
@@ -521,3 +521,225 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vfslide1up.vf v25, v16, ft0
+; CHECK-NEXT:    vmv1r.v v16, v25
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfslide1up.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    double %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vfslide1up.vf v26, v16, ft0
+; CHECK-NEXT:    vmv2r.v v16, v26
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfslide1up.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    double %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vfslide1up.vf v28, v16, ft0
+; CHECK-NEXT:    vmv4r.v v16, v28
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfslide1up.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    double %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
+  <vscale x 8 x double>,
+  double,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vfslide1up.vf v8, v16, ft0
+; CHECK-NEXT:    vmv8r.v v16, v8
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  double,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a1, 8(sp)
+; CHECK-NEXT:    sw a2, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a3, e64,m8,tu,mu
+; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    double %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll
index 48c6dc9999ac..683b82e321ea 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll
@@ -1,18 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
   <vscale x 1 x half>,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfsqrt_v_nxv1f16_nxv1f16(
+define <vscale x 1 x half> @intrinsic_vfsqrt_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, i32 %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v16, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 1 x half> %0,
-  i32 %1) nounwind {
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
     <vscale x 1 x half> %0,
@@ -27,21 +25,17 @@ declare <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16(
+define <vscale x 1 x half> @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v16, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 1 x half> %0,
-  <vscale x 1 x half> %1,
-  <vscale x 1 x i1> %2,
-  i32 %3) nounwind {
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
-    <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    <vscale x 1 x i1> %2,
+    <vscale x 1 x half> %2,
+    <vscale x 1 x i1> %0,
     i32 %3)
 
   ret <vscale x 1 x half> %a
@@ -51,14 +45,12 @@ declare <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16(
   <vscale x 2 x half>,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfsqrt_v_nxv2f16_nxv2f16(
+define <vscale x 2 x half> @intrinsic_vfsqrt_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, i32 %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v16, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 2 x half> %0,
-  i32 %1) nounwind {
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16(
     <vscale x 2 x half> %0,
@@ -73,21 +65,17 @@ declare <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16(
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16(
+define <vscale x 2 x half> @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v16, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 2 x half> %0,
-  <vscale x 2 x half> %1,
-  <vscale x 2 x i1> %2,
-  i32 %3) nounwind {
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16(
-    <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    <vscale x 2 x i1> %2,
+    <vscale x 2 x half> %2,
+    <vscale x 2 x i1> %0,
     i32 %3)
 
   ret <vscale x 2 x half> %a
@@ -97,14 +85,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16(
   <vscale x 4 x half>,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfsqrt_v_nxv4f16_nxv4f16(
+define <vscale x 4 x half> @intrinsic_vfsqrt_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, i32 %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v16, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 4 x half> %0,
-  i32 %1) nounwind {
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16(
     <vscale x 4 x half> %0,
@@ -119,21 +105,17 @@ declare <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16(
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16(
+define <vscale x 4 x half> @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v16, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 4 x half> %0,
-  <vscale x 4 x half> %1,
-  <vscale x 4 x i1> %2,
-  i32 %3) nounwind {
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16(
-    <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    <vscale x 4 x i1> %2,
+    <vscale x 4 x half> %2,
+    <vscale x 4 x i1> %0,
     i32 %3)
 
   ret <vscale x 4 x half> %a
@@ -143,14 +125,12 @@ declare <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16(
   <vscale x 8 x half>,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfsqrt_v_nxv8f16_nxv8f16(
+define <vscale x 8 x half> @intrinsic_vfsqrt_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, i32 %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v16, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 8 x half> %0,
-  i32 %1) nounwind {
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,21 +145,17 @@ declare <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16(
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16(
+define <vscale x 8 x half> @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v16, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 8 x half> %0,
-  <vscale x 8 x half> %1,
-  <vscale x 8 x i1> %2,
-  i32 %3) nounwind {
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16(
-    <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    <vscale x 8 x i1> %2,
+    <vscale x 8 x half> %2,
+    <vscale x 8 x i1> %0,
     i32 %3)
 
   ret <vscale x 8 x half> %a
@@ -189,14 +165,12 @@ declare <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16(
   <vscale x 16 x half>,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfsqrt_v_nxv16f16_nxv16f16(
+define <vscale x 16 x half> @intrinsic_vfsqrt_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, i32 %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v16, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 16 x half> %0,
-  i32 %1) nounwind {
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16(
     <vscale x 16 x half> %0,
@@ -211,21 +185,17 @@ declare <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16(
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16(
+define <vscale x 16 x half> @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v16, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 16 x half> %0,
-  <vscale x 16 x half> %1,
-  <vscale x 16 x i1> %2,
-  i32 %3) nounwind {
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16(
-    <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    <vscale x 16 x i1> %2,
+    <vscale x 16 x half> %2,
+    <vscale x 16 x i1> %0,
     i32 %3)
 
   ret <vscale x 16 x half> %a
@@ -235,14 +205,12 @@ declare <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16(
   <vscale x 32 x half>,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfsqrt_v_nxv32f16_nxv32f16(
+define <vscale x 32 x half> @intrinsic_vfsqrt_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, i32 %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v16, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 32 x half> %0,
-  i32 %1) nounwind {
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16(
     <vscale x 32 x half> %0,
@@ -251,46 +219,16 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i32);
-
-define <vscale x 32 x half> @intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16(
-; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
-; CHECK-NEXT:    vfsqrt.v v16, v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 32 x half> %0,
-  <vscale x 32 x half> %1,
-  <vscale x 32 x i1> %2,
-  i32 %3) nounwind {
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 32 x half> %a
-}
-
 declare <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32(
   <vscale x 1 x float>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfsqrt_v_nxv1f32_nxv1f32(
+define <vscale x 1 x float> @intrinsic_vfsqrt_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v16, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 1 x float> %0,
-  i32 %1) nounwind {
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32(
     <vscale x 1 x float> %0,
@@ -305,21 +243,17 @@ declare <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32(
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32(
+define <vscale x 1 x float> @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v16, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 1 x float> %0,
-  <vscale x 1 x float> %1,
-  <vscale x 1 x i1> %2,
-  i32 %3) nounwind {
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32(
-    <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
+    <vscale x 1 x float> %2,
+    <vscale x 1 x i1> %0,
     i32 %3)
 
   ret <vscale x 1 x float> %a
@@ -329,14 +263,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(
   <vscale x 2 x float>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfsqrt_v_nxv2f32_nxv2f32(
+define <vscale x 2 x float> @intrinsic_vfsqrt_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v16, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 2 x float> %0,
-  i32 %1) nounwind {
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(
     <vscale x 2 x float> %0,
@@ -351,21 +283,17 @@ declare <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32(
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32(
+define <vscale x 2 x float> @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v16, v17, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 2 x float> %0,
-  <vscale x 2 x float> %1,
-  <vscale x 2 x i1> %2,
-  i32 %3) nounwind {
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32(
-    <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
+    <vscale x 2 x float> %2,
+    <vscale x 2 x i1> %0,
     i32 %3)
 
   ret <vscale x 2 x float> %a
@@ -375,14 +303,12 @@ declare <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32(
   <vscale x 4 x float>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfsqrt_v_nxv4f32_nxv4f32(
+define <vscale x 4 x float> @intrinsic_vfsqrt_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v16, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 4 x float> %0,
-  i32 %1) nounwind {
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32(
     <vscale x 4 x float> %0,
@@ -397,21 +323,17 @@ declare <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32(
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32(
+define <vscale x 4 x float> @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v16, v18, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 4 x float> %0,
-  <vscale x 4 x float> %1,
-  <vscale x 4 x i1> %2,
-  i32 %3) nounwind {
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32(
-    <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
+    <vscale x 4 x float> %2,
+    <vscale x 4 x i1> %0,
     i32 %3)
 
   ret <vscale x 4 x float> %a
@@ -421,14 +343,12 @@ declare <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32(
   <vscale x 8 x float>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfsqrt_v_nxv8f32_nxv8f32(
+define <vscale x 8 x float> @intrinsic_vfsqrt_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v16, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 8 x float> %0,
-  i32 %1) nounwind {
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32(
     <vscale x 8 x float> %0,
@@ -443,21 +363,17 @@ declare <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32(
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32(
+define <vscale x 8 x float> @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v16, v20, v0.t
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 8 x float> %0,
-  <vscale x 8 x float> %1,
-  <vscale x 8 x i1> %2,
-  i32 %3) nounwind {
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32(
-    <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
+    <vscale x 8 x float> %2,
+    <vscale x 8 x i1> %0,
     i32 %3)
 
   ret <vscale x 8 x float> %a
@@ -467,14 +383,12 @@ declare <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32(
   <vscale x 16 x float>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfsqrt_v_nxv16f32_nxv16f32(
+define <vscale x 16 x float> @intrinsic_vfsqrt_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, i32 %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v16, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 16 x float> %0,
-  i32 %1) nounwind {
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32(
     <vscale x 16 x float> %0,
@@ -483,30 +397,140 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
+declare <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64(
+  <vscale x 1 x double>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32(
-; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32:
+define <vscale x 1 x double> @intrinsic_vfsqrt_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
-; CHECK-NEXT:    vfsqrt.v v16, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vfsqrt.v v16, v16
 ; CHECK-NEXT:    jalr zero, 0(ra)
-  <vscale x 16 x float> %0,
-  <vscale x 16 x float> %1,
-  <vscale x 16 x i1> %2,
-  i32 %3) nounwind {
 entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x i1> %2,
+  %a = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64(
+    <vscale x 1 x double> %0,
+    i32 %1)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfsqrt.v v16, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64(
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %0,
     i32 %3)
 
-  ret <vscale x 16 x float> %a
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64(
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfsqrt_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vfsqrt.v v16, v16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64(
+    <vscale x 2 x double> %0,
+    i32 %1)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfsqrt.v v16, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64(
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %0,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64(
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfsqrt_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vfsqrt.v v16, v16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64(
+    <vscale x 4 x double> %0,
+    i32 %1)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vfsqrt.v v16, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64(
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %0,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64(
+  <vscale x 8 x double>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfsqrt_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f64_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vfsqrt.v v16, v16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64(
+    <vscale x 8 x double> %0,
+    i32 %1)
+
+  ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll
index 14ccd4ce867a..715425c69579 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -mattr=+experimental-zfh \
-; RUN:   -mattr=+f -verify-machineinstrs \
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
@@ -7,10 +7,12 @@ declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
   i32);
 
 define <vscale x 1 x half> @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -27,10 +29,12 @@ declare <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
   i32);
 
 define <vscale x 1 x half> @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vfsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -47,10 +51,12 @@ declare <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -67,10 +73,12 @@ declare <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vfsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -87,10 +95,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -107,10 +117,12 @@ declare <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vfsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -127,10 +139,12 @@ declare <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -147,10 +161,12 @@ declare <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vfsub.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -167,10 +183,12 @@ declare <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -187,10 +205,14 @@ declare <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vle16.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfsub.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -207,10 +229,14 @@ declare <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16(
   i32);
 
 define <vscale x 32 x half> @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
@@ -227,10 +253,15 @@ declare <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
   i32);
 
 define <vscale x 32 x half> @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a1)
+; CHECK-NEXT:    vle16.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
+; CHECK-NEXT:    vfsub.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
@@ -247,10 +278,12 @@ declare <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -267,10 +300,12 @@ declare <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vfsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -287,10 +322,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -307,10 +344,12 @@ declare <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vfsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -327,10 +366,12 @@ declare <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -347,10 +388,12 @@ declare <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vfsub.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -367,10 +410,12 @@ declare <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -387,10 +432,14 @@ declare <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfsub.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -407,10 +456,14 @@ declare <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32(
   i32);
 
 define <vscale x 16 x float> @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
@@ -427,10 +480,15 @@ declare <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
   i32);
 
 define <vscale x 16 x float> @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a1)
+; CHECK-NEXT:    vle32.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
+; CHECK-NEXT:    vfsub.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK:       vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
@@ -441,16 +499,202 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vfsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vfsub.vv v16, v18, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v28, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vfsub.vv v16, v20, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vfsub.vv v16, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a1)
+; CHECK-NEXT:    vle64.v v24, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
+; CHECK-NEXT:    vfsub.vv v16, v24, v8, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 8 x double> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
 declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   i32);
 
 define <vscale x 1 x half> @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
@@ -467,10 +711,13 @@ declare <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
   i32);
 
 define <vscale x 1 x half> @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vfsub.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -487,10 +734,13 @@ declare <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
@@ -507,10 +757,13 @@ declare <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vfsub.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -527,10 +780,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
@@ -547,10 +803,13 @@ declare <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vfsub.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -567,10 +826,13 @@ declare <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
@@ -587,10 +849,13 @@ declare <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vfsub.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -607,10 +872,13 @@ declare <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
@@ -627,10 +895,13 @@ declare <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfsub.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -647,10 +918,13 @@ declare <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16(
   i32);
 
 define <vscale x 32 x half> @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16(
     <vscale x 32 x half> %0,
     half %1,
@@ -667,10 +941,15 @@ declare <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
   i32);
 
 define <vscale x 32 x half> @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    fmv.h.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e16,m8,tu,mu
+; CHECK-NEXT:    vfsub.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
@@ -687,10 +966,13 @@ declare <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
@@ -707,10 +989,13 @@ declare <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfsub.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -727,10 +1012,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
@@ -747,10 +1035,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfsub.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -767,10 +1058,13 @@ declare <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
@@ -787,10 +1081,13 @@ declare <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfsub.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -807,10 +1104,13 @@ declare <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
@@ -827,10 +1127,13 @@ declare <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfsub.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -847,10 +1150,13 @@ declare <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32(
   i32);
 
 define <vscale x 16 x float> @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, ft0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32(
     <vscale x 16 x float> %0,
     float %1,
@@ -867,10 +1173,15 @@ declare <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
   i32);
 
 define <vscale x 16 x float> @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    vsetvli a0, a2, e32,m8,tu,mu
+; CHECK-NEXT:    vfsub.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK:       vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
@@ -880,3 +1191,221 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vfsub.vf v16, v17, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    double %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vfsub.vf v16, v18, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    double %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vfsub.vf v16, v20, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    double %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64(
+  <vscale x 8 x double>,
+  double,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vfsub.vf v16, v16, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  double,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a1, 8(sp)
+; CHECK-NEXT:    sw a2, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a3, e64,m8,tu,mu
+; CHECK-NEXT:    vfsub.vf v16, v8, ft0, v0.t
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    double %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll
index ec934b3bf12d..cd660085e5e1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
@@ -237,6 +237,196 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vfwmacc.vv v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x float> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vfwmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x float> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32(
+  <vscale x 2 x double>,
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vfwmacc.vv v16, v18, v19
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x float> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32(
+  <vscale x 2 x double>,
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vfwmacc.vv v16, v18, v19, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32(
+  <vscale x 4 x double>,
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vfwmacc.vv v16, v20, v22
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x float> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32(
+  <vscale x 4 x double>,
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vfwmacc.vv v16, v20, v22, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x float> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32(
+  <vscale x 8 x double>,
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  i32);
+
+define <vscale x 8 x double>  @intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a1)
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
+; CHECK-NEXT:    vfwmacc.vv v16, v8, v28
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x float> %2,
+    i32 %3)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32(
+  <vscale x 8 x double>,
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double>  @intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a1)
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
+; CHECK-NEXT:    vfwmacc.vv v16, v8, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x float> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
 declare <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.f16(
   <vscale x 1 x float>,
   half,
@@ -480,3 +670,199 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32(
+  <vscale x 1 x double>,
+  float,
+  <vscale x 1 x float>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfwmacc.vf v16, ft0, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32(
+    <vscale x 1 x double> %0,
+    float %1,
+    <vscale x 1 x float> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32(
+  <vscale x 1 x double>,
+  float,
+  <vscale x 1 x float>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfwmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32(
+    <vscale x 1 x double> %0,
+    float %1,
+    <vscale x 1 x float> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32(
+  <vscale x 2 x double>,
+  float,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfwmacc.vf v16, ft0, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32(
+    <vscale x 2 x double> %0,
+    float %1,
+    <vscale x 2 x float> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32(
+  <vscale x 2 x double>,
+  float,
+  <vscale x 2 x float>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfwmacc.vf v16, ft0, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32(
+    <vscale x 2 x double> %0,
+    float %1,
+    <vscale x 2 x float> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32(
+  <vscale x 4 x double>,
+  float,
+  <vscale x 4 x float>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfwmacc.vf v16, ft0, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32(
+    <vscale x 4 x double> %0,
+    float %1,
+    <vscale x 4 x float> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32(
+  <vscale x 4 x double>,
+  float,
+  <vscale x 4 x float>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfwmacc.vf v16, ft0, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32(
+    <vscale x 4 x double> %0,
+    float %1,
+    <vscale x 4 x float> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32(
+  <vscale x 8 x double>,
+  float,
+  <vscale x 8 x float>,
+  i32);
+
+define <vscale x 8 x double>  @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a1)
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
+; CHECK-NEXT:    vfwmacc.vf v16, ft0, v28
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32(
+    <vscale x 8 x double> %0,
+    float %1,
+    <vscale x 8 x float> %2,
+    i32 %3)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32(
+  <vscale x 8 x double>,
+  float,
+  <vscale x 8 x float>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a1)
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
+; CHECK-NEXT:    vfwmacc.vf v16, ft0, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32(
+    <vscale x 8 x double> %0,
+    float %1,
+    <vscale x 8 x float> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll
index decd4c73f2f4..a01ce063caa4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
@@ -237,6 +237,196 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vfwmsac.vv v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x float> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vfwmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x float> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32(
+  <vscale x 2 x double>,
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vfwmsac.vv v16, v18, v19
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x float> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32(
+  <vscale x 2 x double>,
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vfwmsac.vv v16, v18, v19, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32(
+  <vscale x 4 x double>,
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vfwmsac.vv v16, v20, v22
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x float> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32(
+  <vscale x 4 x double>,
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vfwmsac.vv v16, v20, v22, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x float> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32(
+  <vscale x 8 x double>,
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  i32);
+
+define <vscale x 8 x double>  @intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a1)
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
+; CHECK-NEXT:    vfwmsac.vv v16, v8, v28
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x float> %2,
+    i32 %3)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32(
+  <vscale x 8 x double>,
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double>  @intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a1)
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
+; CHECK-NEXT:    vfwmsac.vv v16, v8, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x float> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
 declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.f16(
   <vscale x 1 x float>,
   half,
@@ -480,3 +670,199 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32(
+  <vscale x 1 x double>,
+  float,
+  <vscale x 1 x float>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfwmsac.vf v16, ft0, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32(
+    <vscale x 1 x double> %0,
+    float %1,
+    <vscale x 1 x float> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32(
+  <vscale x 1 x double>,
+  float,
+  <vscale x 1 x float>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfwmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32(
+    <vscale x 1 x double> %0,
+    float %1,
+    <vscale x 1 x float> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32(
+  <vscale x 2 x double>,
+  float,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfwmsac.vf v16, ft0, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32(
+    <vscale x 2 x double> %0,
+    float %1,
+    <vscale x 2 x float> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32(
+  <vscale x 2 x double>,
+  float,
+  <vscale x 2 x float>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfwmsac.vf v16, ft0, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32(
+    <vscale x 2 x double> %0,
+    float %1,
+    <vscale x 2 x float> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32(
+  <vscale x 4 x double>,
+  float,
+  <vscale x 4 x float>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfwmsac.vf v16, ft0, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32(
+    <vscale x 4 x double> %0,
+    float %1,
+    <vscale x 4 x float> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32(
+  <vscale x 4 x double>,
+  float,
+  <vscale x 4 x float>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfwmsac.vf v16, ft0, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32(
+    <vscale x 4 x double> %0,
+    float %1,
+    <vscale x 4 x float> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32(
+  <vscale x 8 x double>,
+  float,
+  <vscale x 8 x float>,
+  i32);
+
+define <vscale x 8 x double>  @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a1)
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
+; CHECK-NEXT:    vfwmsac.vf v16, ft0, v28
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32(
+    <vscale x 8 x double> %0,
+    float %1,
+    <vscale x 8 x float> %2,
+    i32 %3)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32(
+  <vscale x 8 x double>,
+  float,
+  <vscale x 8 x float>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a1)
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
+; CHECK-NEXT:    vfwmsac.vf v16, ft0, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32(
+    <vscale x 8 x double> %0,
+    float %1,
+    <vscale x 8 x float> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll
index 530f41b8d05a..5a026ceda3d9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll
@@ -1,5 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
@@ -8,12 +7,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
   i32);
 
 define <vscale x 1 x float>  @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
-; CHECK-NEXT:    vfwnmacc.vv v16, v17, v18
-; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
+; CHECK:       vfwnmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
@@ -31,12 +28,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16(
   i32);
 
 define <vscale x 1 x float>  @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
-; CHECK-NEXT:    vfwnmacc.vv v16, v17, v18, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
+; CHECK:       vfwnmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
@@ -54,12 +49,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16(
   i32);
 
 define <vscale x 2 x float>  @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
-; CHECK-NEXT:    vfwnmacc.vv v16, v17, v18
-; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
+; CHECK:       vfwnmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
@@ -77,12 +70,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16(
   i32);
 
 define <vscale x 2 x float>  @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
-; CHECK-NEXT:    vfwnmacc.vv v16, v17, v18, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
+; CHECK:       vfwnmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
@@ -100,12 +91,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16(
   i32);
 
 define <vscale x 4 x float>  @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
-; CHECK-NEXT:    vfwnmacc.vv v16, v18, v19
-; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
+; CHECK:       vfwnmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
@@ -123,12 +112,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16(
   i32);
 
 define <vscale x 4 x float>  @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
-; CHECK-NEXT:    vfwnmacc.vv v16, v18, v19, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
+; CHECK:       vfwnmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
@@ -146,12 +133,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16(
   i32);
 
 define <vscale x 8 x float>  @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
-; CHECK-NEXT:    vfwnmacc.vv v16, v20, v22
-; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
+; CHECK:       vfwnmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
@@ -169,12 +154,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16(
   i32);
 
 define <vscale x 8 x float>  @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
-; CHECK-NEXT:    vfwnmacc.vv v16, v20, v22, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
+; CHECK:       vfwnmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
@@ -192,15 +175,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16(
   i32);
 
 define <vscale x 16 x float>  @intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
-; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
-; CHECK-NEXT:    vfwnmacc.vv v16, v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
+; CHECK:       vfwnmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
@@ -218,15 +196,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16(
   i32);
 
 define <vscale x 16 x float>  @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
-; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
-; CHECK-NEXT:    vfwnmacc.vv v16, v8, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
+; CHECK:       vfwnmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
@@ -237,6 +210,174 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK:       vfwnmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x float> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK:       vfwnmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x float> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32(
+  <vscale x 2 x double>,
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK:       vfwnmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x float> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32(
+  <vscale x 2 x double>,
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK:       vfwnmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32(
+  <vscale x 4 x double>,
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK:       vfwnmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x float> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32(
+  <vscale x 4 x double>,
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK:       vfwnmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x float> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32(
+  <vscale x 8 x double>,
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  i32);
+
+define <vscale x 8 x double>  @intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK:       vfwnmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x float> %2,
+    i32 %3)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32(
+  <vscale x 8 x double>,
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double>  @intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK:       vfwnmacc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x float> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
 declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.f16(
   <vscale x 1 x float>,
   half,
@@ -244,13 +385,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.f16(
   i32);
 
 define <vscale x 1 x float>  @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v17
-; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
+; CHECK:       vfwnmacc.vf {{v[0-9]+}}, ft0, {{v[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.f16(
     <vscale x 1 x float> %0,
     half %1,
@@ -268,13 +406,10 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.f16(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v17, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
+; CHECK:       vfwnmacc.vf {{v[0-9]+}}, ft0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
     half %1,
@@ -292,13 +427,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.f16(
   i32);
 
 define <vscale x 2 x float>  @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v17
-; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
+; CHECK:       vfwnmacc.vf {{v[0-9]+}}, ft0, {{v[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.f16(
     <vscale x 2 x float> %0,
     half %1,
@@ -316,13 +448,10 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.f16(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v17, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
+; CHECK:       vfwnmacc.vf {{v[0-9]+}}, ft0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
     half %1,
@@ -340,13 +469,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.f16(
   i32);
 
 define <vscale x 4 x float>  @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v18
-; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
+; CHECK:       vfwnmacc.vf {{v[0-9]+}}, ft0, {{v[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.f16(
     <vscale x 4 x float> %0,
     half %1,
@@ -364,13 +490,10 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.f16(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v18, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
+; CHECK:       vfwnmacc.vf {{v[0-9]+}}, ft0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
     half %1,
@@ -388,13 +511,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.f16(
   i32);
 
 define <vscale x 8 x float>  @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
+; CHECK:       vfwnmacc.vf {{v[0-9]+}}, ft0, {{v[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.f16(
     <vscale x 8 x float> %0,
     half %1,
@@ -412,13 +532,10 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.f16(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
+; CHECK:       vfwnmacc.vf {{v[0-9]+}}, ft0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
     half %1,
@@ -436,15 +553,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.f16(
   i32);
 
 define <vscale x 16 x float>  @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
-; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
-; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
+; CHECK:       vfwnmacc.vf {{v[0-9]+}}, ft0, {{v[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.f16(
     <vscale x 16 x float> %0,
     half %1,
@@ -462,15 +574,10 @@ declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.f16(
   i32);
 
 define <vscale x 16 x float> @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
-; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    fmv.h.x ft0, a0
-; CHECK-NEXT:    vsetvli a0, a2, e16,m4,tu,mu
-; CHECK-NEXT:    vfwnmacc.vf v16, ft0, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
+; CHECK:       vfwnmacc.vf {{v[0-9]+}}, ft0, {{v[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
     half %1,
@@ -480,3 +587,171 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32(
+  <vscale x 1 x double>,
+  float,
+  <vscale x 1 x float>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK:       vfwnmacc.vf {{v[0-9]+}}, ft0, {{v[0-9]+}}
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32(
+    <vscale x 1 x double> %0,
+    float %1,
+    <vscale x 1 x float> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32(
+  <vscale x 1 x double>,
+  float,
+  <vscale x 1 x float>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK:       vfwnmacc.vf {{v[0-9]+}}, ft0, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32(
+    <vscale x 1 x double> %0,
+    float %1,
+    <vscale x 1 x float> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32(
+  <vscale x 2 x double>,
+  float,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK:       vfwnmacc.vf {{v[0-9]+}}, ft0, {{v[0-9]+}}
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32(
+    <vscale x 2 x double> %0,
+    float %1,
+    <vscale x 2 x float> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32(
+  <vscale x 2 x double>,
+  float,
+  <vscale x 2 x float>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK:       vfwnmacc.vf {{v[0-9]+}}, ft0, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32(
+    <vscale x 2 x double> %0,
+    float %1,
+    <vscale x 2 x float> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32(
+  <vscale x 4 x double>,
+  float,
+  <vscale x 4 x float>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK:       vfwnmacc.vf {{v[0-9]+}}, ft0, {{v[0-9]+}}
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32(
+    <vscale x 4 x double> %0,
+    float %1,
+    <vscale x 4 x float> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32(
+  <vscale x 4 x double>,
+  float,
+  <vscale x 4 x float>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK:       vfwnmacc.vf {{v[0-9]+}}, ft0, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32(
+    <vscale x 4 x double> %0,
+    float %1,
+    <vscale x 4 x float> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32(
+  <vscale x 8 x double>,
+  float,
+  <vscale x 8 x float>,
+  i32);
+
+define <vscale x 8 x double>  @intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK:       vfwnmacc.vf {{v[0-9]+}}, ft0, {{v[0-9]+}}
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32(
+    <vscale x 8 x double> %0,
+    float %1,
+    <vscale x 8 x float> %2,
+    i32 %3)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32(
+  <vscale x 8 x double>,
+  float,
+  <vscale x 8 x float>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK:       vfwnmacc.vf {{v[0-9]+}}, ft0, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32(
+    <vscale x 8 x double> %0,
+    float %1,
+    <vscale x 8 x float> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll
index 784f112da5b2..462a275cf5dc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
@@ -237,6 +237,196 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vfwnmsac.vv v16, v17, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x float> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vfwnmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x float> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32(
+  <vscale x 2 x double>,
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vfwnmsac.vv v16, v18, v19
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x float> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32(
+  <vscale x 2 x double>,
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vfwnmsac.vv v16, v18, v19, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32(
+  <vscale x 4 x double>,
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vfwnmsac.vv v16, v20, v22
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x float> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32(
+  <vscale x 4 x double>,
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vfwnmsac.vv v16, v20, v22, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x float> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32(
+  <vscale x 8 x double>,
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  i32);
+
+define <vscale x 8 x double>  @intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a1)
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
+; CHECK-NEXT:    vfwnmsac.vv v16, v8, v28
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x float> %2,
+    i32 %3)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32(
+  <vscale x 8 x double>,
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double>  @intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a1)
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
+; CHECK-NEXT:    vfwnmsac.vv v16, v8, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x float> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
 declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.f16(
   <vscale x 1 x float>,
   half,
@@ -480,3 +670,199 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32(
+  <vscale x 1 x double>,
+  float,
+  <vscale x 1 x float>,
+  i32);
+
+define <vscale x 1 x double>  @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v17
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32(
+    <vscale x 1 x double> %0,
+    float %1,
+    <vscale x 1 x float> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32(
+  <vscale x 1 x double>,
+  float,
+  <vscale x 1 x float>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32(
+    <vscale x 1 x double> %0,
+    float %1,
+    <vscale x 1 x float> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32(
+  <vscale x 2 x double>,
+  float,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x double>  @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v18
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32(
+    <vscale x 2 x double> %0,
+    float %1,
+    <vscale x 2 x float> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32(
+  <vscale x 2 x double>,
+  float,
+  <vscale x 2 x float>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v18, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32(
+    <vscale x 2 x double> %0,
+    float %1,
+    <vscale x 2 x float> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32(
+  <vscale x 4 x double>,
+  float,
+  <vscale x 4 x float>,
+  i32);
+
+define <vscale x 4 x double>  @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v20
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32(
+    <vscale x 4 x double> %0,
+    float %1,
+    <vscale x 4 x float> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32(
+  <vscale x 4 x double>,
+  float,
+  <vscale x 4 x float>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v20, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32(
+    <vscale x 4 x double> %0,
+    float %1,
+    <vscale x 4 x float> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32(
+  <vscale x 8 x double>,
+  float,
+  <vscale x 8 x float>,
+  i32);
+
+define <vscale x 8 x double>  @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a1)
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
+; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v28
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32(
+    <vscale x 8 x double> %0,
+    float %1,
+    <vscale x 8 x float> %2,
+    i32 %3)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32(
+  <vscale x 8 x double>,
+  float,
+  <vscale x 8 x float>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v28, (a1)
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli a0, a2, e32,m4,tu,mu
+; CHECK-NEXT:    vfwnmsac.vf v16, ft0, v28, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32(
+    <vscale x 8 x double> %0,
+    float %1,
+    <vscale x 8 x float> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
index 3e2fb4ca7653..c366e10ffdf6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
@@ -1,4 +1,5 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
   <vscale x 1 x i8>,
@@ -7,10 +8,12 @@ declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
   i32);
 
 define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
@@ -28,10 +31,12 @@ declare <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
   i32);
 
 define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
@@ -43,10 +48,12 @@ entry:
 }
 
 define <vscale x 1 x i8> @intrinsic_vslidedown_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
@@ -57,10 +64,12 @@ entry:
 }
 
 define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
@@ -78,10 +87,12 @@ declare <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
   i32);
 
 define <vscale x 2 x i8> @intrinsic_vslidedown_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
@@ -99,10 +110,12 @@ declare <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
   i32);
 
 define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
@@ -114,10 +127,12 @@ entry:
 }
 
 define <vscale x 2 x i8> @intrinsic_vslidedown_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
@@ -128,10 +143,12 @@ entry:
 }
 
 define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
@@ -149,10 +166,12 @@ declare <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
   i32);
 
 define <vscale x 4 x i8> @intrinsic_vslidedown_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
@@ -170,10 +189,12 @@ declare <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
   i32);
 
 define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
@@ -185,10 +206,12 @@ entry:
 }
 
 define <vscale x 4 x i8> @intrinsic_vslidedown_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
@@ -199,10 +222,12 @@ entry:
 }
 
 define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
@@ -220,10 +245,12 @@ declare <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
   i32);
 
 define <vscale x 8 x i8> @intrinsic_vslidedown_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
@@ -241,10 +268,12 @@ declare <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
   i32);
 
 define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
@@ -256,10 +285,12 @@ entry:
 }
 
 define <vscale x 8 x i8> @intrinsic_vslidedown_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
@@ -270,10 +301,12 @@ entry:
 }
 
 define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
@@ -291,10 +324,12 @@ declare <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
   i32);
 
 define <vscale x 16 x i8> @intrinsic_vslidedown_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v18, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
@@ -312,10 +347,12 @@ declare <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
   i32);
 
 define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v18, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
@@ -327,10 +364,12 @@ entry:
 }
 
 define <vscale x 16 x i8> @intrinsic_vslidedown_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v18, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
@@ -341,10 +380,12 @@ entry:
 }
 
 define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v18, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
@@ -362,10 +403,12 @@ declare <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
   i32);
 
 define <vscale x 32 x i8> @intrinsic_vslidedown_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v20, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
@@ -383,10 +426,12 @@ declare <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
   i32);
 
 define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v20, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
@@ -398,10 +443,12 @@ entry:
 }
 
 define <vscale x 32 x i8> @intrinsic_vslidedown_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v20, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
@@ -412,10 +459,12 @@ entry:
 }
 
 define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v20, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
@@ -433,10 +482,12 @@ declare <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
   i32);
 
 define <vscale x 1 x i16> @intrinsic_vslidedown_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
@@ -454,10 +505,12 @@ declare <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
   i32);
 
 define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
@@ -469,10 +522,12 @@ entry:
 }
 
 define <vscale x 1 x i16> @intrinsic_vslidedown_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
@@ -483,10 +538,12 @@ entry:
 }
 
 define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
@@ -504,10 +561,12 @@ declare <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
   i32);
 
 define <vscale x 2 x i16> @intrinsic_vslidedown_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
@@ -525,10 +584,12 @@ declare <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
   i32);
 
 define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
@@ -540,10 +601,12 @@ entry:
 }
 
 define <vscale x 2 x i16> @intrinsic_vslidedown_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
@@ -554,10 +617,12 @@ entry:
 }
 
 define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
@@ -575,10 +640,12 @@ declare <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
   i32);
 
 define <vscale x 4 x i16> @intrinsic_vslidedown_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
@@ -596,10 +663,12 @@ declare <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
   i32);
 
 define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
@@ -611,10 +680,12 @@ entry:
 }
 
 define <vscale x 4 x i16> @intrinsic_vslidedown_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
@@ -625,10 +696,12 @@ entry:
 }
 
 define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
@@ -646,10 +719,12 @@ declare <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
   i32);
 
 define <vscale x 8 x i16> @intrinsic_vslidedown_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v18, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
@@ -667,10 +742,12 @@ declare <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
   i32);
 
 define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v18, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
@@ -682,10 +759,12 @@ entry:
 }
 
 define <vscale x 8 x i16> @intrinsic_vslidedown_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v18, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
@@ -696,10 +775,12 @@ entry:
 }
 
 define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v18, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
@@ -717,10 +798,12 @@ declare <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
   i32);
 
 define <vscale x 16 x i16> @intrinsic_vslidedown_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v20, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
@@ -738,10 +821,12 @@ declare <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
   i32);
 
 define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v20, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
@@ -753,10 +838,12 @@ entry:
 }
 
 define <vscale x 16 x i16> @intrinsic_vslidedown_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v20, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
@@ -767,10 +854,12 @@ entry:
 }
 
 define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v20, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
@@ -788,10 +877,12 @@ declare <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
   i32);
 
 define <vscale x 1 x i32> @intrinsic_vslidedown_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
@@ -809,10 +900,12 @@ declare <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
   i32);
 
 define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
@@ -824,10 +917,12 @@ entry:
 }
 
 define <vscale x 1 x i32> @intrinsic_vslidedown_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
@@ -838,10 +933,12 @@ entry:
 }
 
 define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
@@ -859,10 +956,12 @@ declare <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
   i32);
 
 define <vscale x 2 x i32> @intrinsic_vslidedown_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
@@ -880,10 +979,12 @@ declare <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
   i32);
 
 define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
@@ -895,10 +996,12 @@ entry:
 }
 
 define <vscale x 2 x i32> @intrinsic_vslidedown_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
@@ -909,10 +1012,12 @@ entry:
 }
 
 define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
@@ -930,10 +1035,12 @@ declare <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
   i32);
 
 define <vscale x 4 x i32> @intrinsic_vslidedown_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v18, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
@@ -951,10 +1058,12 @@ declare <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
   i32);
 
 define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v18, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
@@ -966,10 +1075,12 @@ entry:
 }
 
 define <vscale x 4 x i32> @intrinsic_vslidedown_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v18, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
@@ -980,10 +1091,12 @@ entry:
 }
 
 define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v18, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
@@ -1001,10 +1114,12 @@ declare <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
   i32);
 
 define <vscale x 8 x i32> @intrinsic_vslidedown_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v20, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
@@ -1022,10 +1137,12 @@ declare <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
   i32);
 
 define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v20, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
@@ -1037,10 +1154,12 @@ entry:
 }
 
 define <vscale x 8 x i32> @intrinsic_vslidedown_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v20, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
@@ -1051,10 +1170,12 @@ entry:
 }
 
 define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v20, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
@@ -1072,10 +1193,12 @@ declare <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
   i32);
 
 define <vscale x 1 x half> @intrinsic_vslidedown_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -1093,10 +1216,12 @@ declare <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
   i32);
 
 define <vscale x 1 x half> @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -1108,10 +1233,12 @@ entry:
 }
 
 define <vscale x 1 x half> @intrinsic_vslidedown_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -1122,10 +1249,12 @@ entry:
 }
 
 define <vscale x 1 x half> @intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -1143,10 +1272,12 @@ declare <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vslidedown_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -1164,10 +1295,12 @@ declare <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -1179,10 +1312,12 @@ entry:
 }
 
 define <vscale x 2 x half> @intrinsic_vslidedown_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -1193,10 +1328,12 @@ entry:
 }
 
 define <vscale x 2 x half> @intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -1214,10 +1351,12 @@ declare <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vslidedown_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -1235,10 +1374,12 @@ declare <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -1250,10 +1391,12 @@ entry:
 }
 
 define <vscale x 4 x half> @intrinsic_vslidedown_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -1264,10 +1407,12 @@ entry:
 }
 
 define <vscale x 4 x half> @intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -1285,10 +1430,12 @@ declare <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vslidedown_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v18, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -1306,10 +1453,12 @@ declare <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v18, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -1321,10 +1470,12 @@ entry:
 }
 
 define <vscale x 8 x half> @intrinsic_vslidedown_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v18, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -1335,10 +1486,12 @@ entry:
 }
 
 define <vscale x 8 x half> @intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v18, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -1356,10 +1509,12 @@ declare <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vslidedown_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v20, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -1377,10 +1532,12 @@ declare <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v20, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -1392,10 +1549,12 @@ entry:
 }
 
 define <vscale x 16 x half> @intrinsic_vslidedown_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v20, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -1406,10 +1565,12 @@ entry:
 }
 
 define <vscale x 16 x half> @intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v20, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -1427,10 +1588,12 @@ declare <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vslidedown_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -1448,10 +1611,12 @@ declare <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -1463,10 +1628,12 @@ entry:
 }
 
 define <vscale x 1 x float> @intrinsic_vslidedown_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -1477,10 +1644,12 @@ entry:
 }
 
 define <vscale x 1 x float> @intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -1498,10 +1667,12 @@ declare <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vslidedown_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -1519,10 +1690,12 @@ declare <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -1534,10 +1707,12 @@ entry:
 }
 
 define <vscale x 2 x float> @intrinsic_vslidedown_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -1548,10 +1723,12 @@ entry:
 }
 
 define <vscale x 2 x float> @intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -1569,10 +1746,12 @@ declare <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vslidedown_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v18, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -1590,10 +1769,12 @@ declare <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v18, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -1605,10 +1786,12 @@ entry:
 }
 
 define <vscale x 4 x float> @intrinsic_vslidedown_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v18, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -1619,10 +1802,12 @@ entry:
 }
 
 define <vscale x 4 x float> @intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v18, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -1640,10 +1825,12 @@ declare <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vslidedown_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v20, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -1661,10 +1848,12 @@ declare <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v20, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -1676,10 +1865,12 @@ entry:
 }
 
 define <vscale x 8 x float> @intrinsic_vslidedown_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v20, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -1690,10 +1881,12 @@ entry:
 }
 
 define <vscale x 8 x float> @intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v20, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -1703,3 +1896,240 @@ entry:
 
   ret <vscale x 8 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vslidedown_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+define <vscale x 1 x double> @intrinsic_vslidedown_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+define <vscale x 1 x double> @intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vslidedown_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v18, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v18, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+define <vscale x 2 x double> @intrinsic_vslidedown_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v18, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+define <vscale x 2 x double> @intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v18, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vslidedown_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v20, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vx v16, v20, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+define <vscale x 4 x double> @intrinsic_vslidedown_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v20, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+define <vscale x 4 x double> @intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vslidedown.vi v16, v20, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
index 3a5c47b9112e..b21236fc7ba4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
@@ -1,4 +1,5 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
   <vscale x 1 x i8>,
@@ -7,10 +8,12 @@ declare <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
   i32);
 
 define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
@@ -28,10 +31,12 @@ declare <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
   i32);
 
 define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
@@ -43,10 +48,12 @@ entry:
 }
 
 define <vscale x 1 x i8> @intrinsic_vslideup_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
@@ -57,10 +64,12 @@ entry:
 }
 
 define <vscale x 1 x i8> @intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
@@ -78,10 +87,12 @@ declare <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
   i32);
 
 define <vscale x 2 x i8> @intrinsic_vslideup_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
@@ -99,10 +110,12 @@ declare <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
   i32);
 
 define <vscale x 2 x i8> @intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
@@ -114,10 +127,12 @@ entry:
 }
 
 define <vscale x 2 x i8> @intrinsic_vslideup_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
@@ -128,10 +143,12 @@ entry:
 }
 
 define <vscale x 2 x i8> @intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
@@ -149,10 +166,12 @@ declare <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
   i32);
 
 define <vscale x 4 x i8> @intrinsic_vslideup_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
@@ -170,10 +189,12 @@ declare <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
   i32);
 
 define <vscale x 4 x i8> @intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
@@ -185,10 +206,12 @@ entry:
 }
 
 define <vscale x 4 x i8> @intrinsic_vslideup_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
@@ -199,10 +222,12 @@ entry:
 }
 
 define <vscale x 4 x i8> @intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
@@ -220,10 +245,12 @@ declare <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
   i32);
 
 define <vscale x 8 x i8> @intrinsic_vslideup_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
@@ -241,10 +268,12 @@ declare <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
   i32);
 
 define <vscale x 8 x i8> @intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
@@ -256,10 +285,12 @@ entry:
 }
 
 define <vscale x 8 x i8> @intrinsic_vslideup_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
@@ -270,10 +301,12 @@ entry:
 }
 
 define <vscale x 8 x i8> @intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
@@ -291,10 +324,12 @@ declare <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
   i32);
 
 define <vscale x 16 x i8> @intrinsic_vslideup_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v18, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
@@ -312,10 +347,12 @@ declare <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
   i32);
 
 define <vscale x 16 x i8> @intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v18, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
@@ -327,10 +364,12 @@ entry:
 }
 
 define <vscale x 16 x i8> @intrinsic_vslideup_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i8_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v18, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
@@ -341,10 +380,12 @@ entry:
 }
 
 define <vscale x 16 x i8> @intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v18, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
@@ -362,10 +403,12 @@ declare <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
   i32);
 
 define <vscale x 32 x i8> @intrinsic_vslideup_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v20, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
@@ -383,10 +426,12 @@ declare <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
   i32);
 
 define <vscale x 32 x i8> @intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v20, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
@@ -398,10 +443,12 @@ entry:
 }
 
 define <vscale x 32 x i8> @intrinsic_vslideup_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv32i8_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v20, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
@@ -412,10 +459,12 @@ entry:
 }
 
 define <vscale x 32 x i8> @intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v20, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
@@ -433,10 +482,12 @@ declare <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
   i32);
 
 define <vscale x 1 x i16> @intrinsic_vslideup_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
@@ -454,10 +505,12 @@ declare <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
   i32);
 
 define <vscale x 1 x i16> @intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
@@ -469,10 +522,12 @@ entry:
 }
 
 define <vscale x 1 x i16> @intrinsic_vslideup_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i16_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
@@ -483,10 +538,12 @@ entry:
 }
 
 define <vscale x 1 x i16> @intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
@@ -504,10 +561,12 @@ declare <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
   i32);
 
 define <vscale x 2 x i16> @intrinsic_vslideup_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
@@ -525,10 +584,12 @@ declare <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
   i32);
 
 define <vscale x 2 x i16> @intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
@@ -540,10 +601,12 @@ entry:
 }
 
 define <vscale x 2 x i16> @intrinsic_vslideup_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i16_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
@@ -554,10 +617,12 @@ entry:
 }
 
 define <vscale x 2 x i16> @intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
@@ -575,10 +640,12 @@ declare <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
   i32);
 
 define <vscale x 4 x i16> @intrinsic_vslideup_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
@@ -596,10 +663,12 @@ declare <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
   i32);
 
 define <vscale x 4 x i16> @intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
@@ -611,10 +680,12 @@ entry:
 }
 
 define <vscale x 4 x i16> @intrinsic_vslideup_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i16_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
@@ -625,10 +696,12 @@ entry:
 }
 
 define <vscale x 4 x i16> @intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
@@ -646,10 +719,12 @@ declare <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
   i32);
 
 define <vscale x 8 x i16> @intrinsic_vslideup_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v18, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
@@ -667,10 +742,12 @@ declare <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
   i32);
 
 define <vscale x 8 x i16> @intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v18, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
@@ -682,10 +759,12 @@ entry:
 }
 
 define <vscale x 8 x i16> @intrinsic_vslideup_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i16_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v18, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
@@ -696,10 +775,12 @@ entry:
 }
 
 define <vscale x 8 x i16> @intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v18, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
@@ -717,10 +798,12 @@ declare <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
   i32);
 
 define <vscale x 16 x i16> @intrinsic_vslideup_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v20, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
@@ -738,10 +821,12 @@ declare <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
   i32);
 
 define <vscale x 16 x i16> @intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v20, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
@@ -753,10 +838,12 @@ entry:
 }
 
 define <vscale x 16 x i16> @intrinsic_vslideup_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i16_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v20, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
@@ -767,10 +854,12 @@ entry:
 }
 
 define <vscale x 16 x i16> @intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v20, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
@@ -788,10 +877,12 @@ declare <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
   i32);
 
 define <vscale x 1 x i32> @intrinsic_vslideup_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
@@ -809,10 +900,12 @@ declare <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
   i32);
 
 define <vscale x 1 x i32> @intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
@@ -824,10 +917,12 @@ entry:
 }
 
 define <vscale x 1 x i32> @intrinsic_vslideup_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i32_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
@@ -838,10 +933,12 @@ entry:
 }
 
 define <vscale x 1 x i32> @intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
@@ -859,10 +956,12 @@ declare <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
   i32);
 
 define <vscale x 2 x i32> @intrinsic_vslideup_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
@@ -880,10 +979,12 @@ declare <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
   i32);
 
 define <vscale x 2 x i32> @intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
@@ -895,10 +996,12 @@ entry:
 }
 
 define <vscale x 2 x i32> @intrinsic_vslideup_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i32_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
@@ -909,10 +1012,12 @@ entry:
 }
 
 define <vscale x 2 x i32> @intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
@@ -930,10 +1035,12 @@ declare <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
   i32);
 
 define <vscale x 4 x i32> @intrinsic_vslideup_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v18, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
@@ -951,10 +1058,12 @@ declare <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
   i32);
 
 define <vscale x 4 x i32> @intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v18, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
@@ -966,10 +1075,12 @@ entry:
 }
 
 define <vscale x 4 x i32> @intrinsic_vslideup_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i32_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v18, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
@@ -980,10 +1091,12 @@ entry:
 }
 
 define <vscale x 4 x i32> @intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v18, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
@@ -1001,10 +1114,12 @@ declare <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
   i32);
 
 define <vscale x 8 x i32> @intrinsic_vslideup_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v20, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
@@ -1022,10 +1137,12 @@ declare <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
   i32);
 
 define <vscale x 8 x i32> @intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v20, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
@@ -1037,10 +1154,12 @@ entry:
 }
 
 define <vscale x 8 x i32> @intrinsic_vslideup_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v20, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
@@ -1051,10 +1170,12 @@ entry:
 }
 
 define <vscale x 8 x i32> @intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v20, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
@@ -1072,10 +1193,12 @@ declare <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
   i32);
 
 define <vscale x 1 x half> @intrinsic_vslideup_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -1093,10 +1216,12 @@ declare <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
   i32);
 
 define <vscale x 1 x half> @intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -1108,10 +1233,12 @@ entry:
 }
 
 define <vscale x 1 x half> @intrinsic_vslideup_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -1122,10 +1249,12 @@ entry:
 }
 
 define <vscale x 1 x half> @intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
@@ -1143,10 +1272,12 @@ declare <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vslideup_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -1164,10 +1295,12 @@ declare <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
   i32);
 
 define <vscale x 2 x half> @intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -1179,10 +1312,12 @@ entry:
 }
 
 define <vscale x 2 x half> @intrinsic_vslideup_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -1193,10 +1328,12 @@ entry:
 }
 
 define <vscale x 2 x half> @intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
@@ -1214,10 +1351,12 @@ declare <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vslideup_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -1235,10 +1374,12 @@ declare <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
   i32);
 
 define <vscale x 4 x half> @intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -1250,10 +1391,12 @@ entry:
 }
 
 define <vscale x 4 x half> @intrinsic_vslideup_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -1264,10 +1407,12 @@ entry:
 }
 
 define <vscale x 4 x half> @intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
@@ -1285,10 +1430,12 @@ declare <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vslideup_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v18, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -1306,10 +1453,12 @@ declare <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
   i32);
 
 define <vscale x 8 x half> @intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v18, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -1321,10 +1470,12 @@ entry:
 }
 
 define <vscale x 8 x half> @intrinsic_vslideup_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v18, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -1335,10 +1486,12 @@ entry:
 }
 
 define <vscale x 8 x half> @intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v18, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
@@ -1356,10 +1509,12 @@ declare <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vslideup_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v20, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -1377,10 +1532,12 @@ declare <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
   i32);
 
 define <vscale x 16 x half> @intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v20, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -1392,10 +1549,12 @@ entry:
 }
 
 define <vscale x 16 x half> @intrinsic_vslideup_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v20, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -1406,10 +1565,12 @@ entry:
 }
 
 define <vscale x 16 x half> @intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v20, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
@@ -1427,10 +1588,12 @@ declare <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vslideup_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -1448,10 +1611,12 @@ declare <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
   i32);
 
 define <vscale x 1 x float> @intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -1463,10 +1628,12 @@ entry:
 }
 
 define <vscale x 1 x float> @intrinsic_vslideup_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -1477,10 +1644,12 @@ entry:
 }
 
 define <vscale x 1 x float> @intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
@@ -1498,10 +1667,12 @@ declare <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vslideup_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -1519,10 +1690,12 @@ declare <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
   i32);
 
 define <vscale x 2 x float> @intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -1534,10 +1707,12 @@ entry:
 }
 
 define <vscale x 2 x float> @intrinsic_vslideup_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -1548,10 +1723,12 @@ entry:
 }
 
 define <vscale x 2 x float> @intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
@@ -1569,10 +1746,12 @@ declare <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vslideup_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v18, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -1590,10 +1769,12 @@ declare <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
   i32);
 
 define <vscale x 4 x float> @intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v18, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -1605,10 +1786,12 @@ entry:
 }
 
 define <vscale x 4 x float> @intrinsic_vslideup_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v18, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -1619,10 +1802,12 @@ entry:
 }
 
 define <vscale x 4 x float> @intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v18, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
@@ -1640,10 +1825,12 @@ declare <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vslideup_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v20, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -1661,10 +1848,12 @@ declare <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
   i32);
 
 define <vscale x 8 x float> @intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v20, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -1676,10 +1865,12 @@ entry:
 }
 
 define <vscale x 8 x float> @intrinsic_vslideup_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v20, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -1690,10 +1881,12 @@ entry:
 }
 
 define <vscale x 8 x float> @intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v20, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
 entry:
-; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32
-; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
@@ -1703,3 +1896,240 @@ entry:
 
   ret <vscale x 8 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vslideup_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v17, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+define <vscale x 1 x double> @intrinsic_vslideup_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+define <vscale x 1 x double> @intrinsic_vslideup_mask_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v17, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vslideup_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v18, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v18, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+define <vscale x 2 x double> @intrinsic_vslideup_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v18, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+define <vscale x 2 x double> @intrinsic_vslideup_mask_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v18, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vslideup_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v20, a0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT:    vslideup.vx v16, v20, a0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+define <vscale x 4 x double> @intrinsic_vslideup_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v20, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+define <vscale x 4 x double> @intrinsic_vslideup_mask_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vslideup.vi v16, v20, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}


        


More information about the llvm-branch-commits mailing list