[llvm] 438b6dd - [RISCV] Add missing nxvXf64 intrinsics tests cases for floating-point compare for RV32.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 1 21:29:34 PDT 2021


Author: Craig Topper
Date: 2021-04-01T20:57:13-07:00
New Revision: 438b6dd3e53a9f28ac662d1dd067188cfe5767e9

URL: https://github.com/llvm/llvm-project/commit/438b6dd3e53a9f28ac662d1dd067188cfe5767e9
DIFF: https://github.com/llvm/llvm-project/commit/438b6dd3e53a9f28ac662d1dd067188cfe5767e9.diff

LOG: [RISCV] Add missing nxvXf64 intrinsics tests cases for floating-point compare for RV32.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll
index 34cceec84bc0..f588b5743ae9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
   <vscale x 1 x half>,
@@ -469,6 +469,162 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vmfeq.vv v0, v8, v9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64(
+  <vscale x 1 x i1>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a1, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vmfeq.vv v0, v8, v9
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64(
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %4)
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x double> %3,
+    <vscale x 1 x i1> %mask,
+    i32 %4)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vv v0, v8, v10
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64(
+  <vscale x 2 x i1>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a1, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vv v0, v8, v10
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vmfeq.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    i32 %4)
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x double> %3,
+    <vscale x 2 x i1> %mask,
+    i32 %4)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vv v0, v8, v12
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64(
+  <vscale x 4 x i1>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a1, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vv v0, v8, v12
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vmfeq.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    i32 %4)
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x double> %3,
+    <vscale x 4 x i1> %mask,
+    i32 %4)
+
+  ret <vscale x 4 x i1> %a
+}
+
 declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -909,3 +1065,174 @@ entry:
 
   ret <vscale x 8 x i1> %a
 }
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64(
+  <vscale x 1 x i1>,
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x double> %1,
+    double %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64(
+  <vscale x 2 x i1>,
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vmv1r.v v0, v10
+; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x double> %1,
+    double %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64(
+  <vscale x 4 x i1>,
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vmv1r.v v0, v12
+; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x double> %1,
+    double %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll
index 0d3ecdb2f6e7..df03119ffb43 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16.f16(
   <vscale x 1 x half>,
@@ -441,3 +441,174 @@ entry:
 
   ret <vscale x 8 x i1> %a
 }
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v8, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64(
+  <vscale x 1 x i1>,
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x double> %1,
+    double %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v8, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64(
+  <vscale x 2 x i1>,
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vmv1r.v v0, v10
+; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x double> %1,
+    double %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v8, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64(
+  <vscale x 4 x i1>,
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vmv1r.v v0, v12
+; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x double> %1,
+    double %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll
index 7da9fb02c56b..f4a65eab8806 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16.f16(
   <vscale x 1 x half>,
@@ -441,3 +441,174 @@ entry:
 
   ret <vscale x 8 x i1> %a
 }
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64(
+  <vscale x 1 x i1>,
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x double> %1,
+    double %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64(
+  <vscale x 2 x i1>,
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vmv1r.v v0, v10
+; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x double> %1,
+    double %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64(
+  <vscale x 4 x i1>,
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vmv1r.v v0, v12
+; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x double> %1,
+    double %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll
index 7984dadaa79f..23f1ff618eb8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
   <vscale x 1 x half>,
@@ -469,6 +469,162 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v8, v9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64(
+  <vscale x 1 x i1>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a1, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v8, v9
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %4)
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x double> %3,
+    <vscale x 1 x i1> %mask,
+    i32 %4)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v8, v10
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64(
+  <vscale x 2 x i1>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a1, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v8, v10
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vmfle.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    i32 %4)
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x double> %3,
+    <vscale x 2 x i1> %mask,
+    i32 %4)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v8, v12
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64(
+  <vscale x 4 x i1>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a1, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v8, v12
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vmfle.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    i32 %4)
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x double> %3,
+    <vscale x 4 x i1> %mask,
+    i32 %4)
+
+  ret <vscale x 4 x i1> %a
+}
+
 declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -909,3 +1065,174 @@ entry:
 
   ret <vscale x 8 x i1> %a
 }
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v8, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64(
+  <vscale x 1 x i1>,
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x double> %1,
+    double %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v8, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64(
+  <vscale x 2 x i1>,
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vmv1r.v v0, v10
+; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x double> %1,
+    double %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v8, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64(
+  <vscale x 4 x i1>,
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vmv1r.v v0, v12
+; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x double> %1,
+    double %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll
index 87a95ac73f7f..4bed8e42a368 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
   <vscale x 1 x half>,
@@ -469,6 +469,162 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v8, v9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64(
+  <vscale x 1 x i1>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a1, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v8, v9
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64(
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %4)
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x double> %3,
+    <vscale x 1 x i1> %mask,
+    i32 %4)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v8, v10
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64(
+  <vscale x 2 x i1>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a1, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v8, v10
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vmflt.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    i32 %4)
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x double> %3,
+    <vscale x 2 x i1> %mask,
+    i32 %4)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v8, v12
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64(
+  <vscale x 4 x i1>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a1, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v8, v12
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vmflt.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    i32 %4)
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x double> %3,
+    <vscale x 4 x i1> %mask,
+    i32 %4)
+
+  ret <vscale x 4 x i1> %a
+}
+
 declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -909,3 +1065,174 @@ entry:
 
   ret <vscale x 8 x i1> %a
 }
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v8, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64(
+  <vscale x 1 x i1>,
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x double> %1,
+    double %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v8, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64(
+  <vscale x 2 x i1>,
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vmv1r.v v0, v10
+; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x double> %1,
+    double %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v8, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64(
+  <vscale x 4 x i1>,
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vmv1r.v v0, v12
+; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x double> %1,
+    double %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll
index 1ec190dfa0b8..a849c0157d61 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
   <vscale x 1 x half>,
@@ -469,6 +469,162 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vmfne.vv v0, v8, v9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64(
+  <vscale x 1 x i1>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a1, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vmfne.vv v0, v8, v9
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %4)
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x double> %3,
+    <vscale x 1 x i1> %mask,
+    i32 %4)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vmfne.vv v0, v8, v10
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64(
+  <vscale x 2 x i1>,
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a1, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vmfne.vv v0, v8, v10
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vmfne.vv v25, v10, v12, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
+    <vscale x 2 x double> %1,
+    <vscale x 2 x double> %2,
+    i32 %4)
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x double> %2,
+    <vscale x 2 x double> %3,
+    <vscale x 2 x i1> %mask,
+    i32 %4)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vmfne.vv v0, v8, v12
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64(
+  <vscale x 4 x i1>,
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a1, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vmfne.vv v0, v8, v12
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vmfne.vv v25, v12, v16, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
+    <vscale x 4 x double> %1,
+    <vscale x 4 x double> %2,
+    i32 %4)
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x double> %2,
+    <vscale x 4 x double> %3,
+    <vscale x 4 x i1> %mask,
+    i32 %4)
+
+  ret <vscale x 4 x i1> %a
+}
+
 declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -909,3 +1065,174 @@ entry:
 
   ret <vscale x 8 x i1> %a
 }
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.f64(
+  <vscale x 1 x double>,
+  double,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v8, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.f64(
+    <vscale x 1 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64(
+  <vscale x 1 x i1>,
+  <vscale x 1 x double>,
+  double,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x double> %1,
+    double %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.f64(
+  <vscale x 2 x double>,
+  double,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v8, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.f64(
+    <vscale x 2 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64(
+  <vscale x 2 x i1>,
+  <vscale x 2 x double>,
+  double,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vmv1r.v v0, v10
+; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x double> %1,
+    double %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.f64(
+  <vscale x 4 x double>,
+  double,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v8, ft0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.f64(
+    <vscale x 4 x double> %0,
+    double %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64(
+  <vscale x 4 x i1>,
+  <vscale x 4 x double>,
+  double,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f64_f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw a0, 8(sp)
+; CHECK-NEXT:    sw a1, 12(sp)
+; CHECK-NEXT:    fld ft0, 8(sp)
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vmv1r.v v0, v12
+; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x double> %1,
+    double %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i1> %a
+}


        


More information about the llvm-commits mailing list