[llvm-branch-commits] [llvm] 37b41bd - [RISCV] Add scalable vector fcmp ISel patterns

Fraser Cormack via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Mon Jan 11 11:49:14 PST 2021


Author: Fraser Cormack
Date: 2021-01-11T19:38:56Z
New Revision: 37b41bd0879e8ed1a07a6fc401a2b56dcd6f124c

URL: https://github.com/llvm/llvm-project/commit/37b41bd0879e8ed1a07a6fc401a2b56dcd6f124c
DIFF: https://github.com/llvm/llvm-project/commit/37b41bd0879e8ed1a07a6fc401a2b56dcd6f124c.diff

LOG: [RISCV] Add scalable vector fcmp ISel patterns

Original patch by @rogfer01.

All ordered comparisons except ONE are supported natively, and all
unordered comparisons except UNE are expanded into sequences involving
explicit NaN checks and mask arithmetic.

Additionally, we expand GT,OGT,GE,OGE to their swapped-operand versions, and
pattern-match those back to the "original", swapping operands once more. This
way we catch both operations and both "vf" and "fv" forms with fewer patterns.

Also add support for floating-point splat_vector, with an optimization for
splatting fpimm0.

Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: Fraser Cormack <fraser at codeplay.com>

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D94242

Added: 
    llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 2349b43f30c6b..03db9911c8671 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -374,6 +374,48 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     // We must custom-lower SPLAT_VECTOR vXi64 on RV32
     if (!Subtarget.is64Bit())
       setOperationAction(ISD::SPLAT_VECTOR, MVT::i64, Custom);
+
+    // Expand various CCs to best match the RVV ISA, which natively supports UNE
+    // but no other unordered comparisons, and supports all ordered comparisons
+    // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
+    // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
+    // and we pattern-match those back to the "original", swapping operands once
+    // more. This way we catch both operations and both "vf" and "fv" forms with
+    // fewer patterns.
+    ISD::CondCode VFPCCToExpand[] = {
+        ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
+        ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
+        ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
+    };
+
+    if (Subtarget.hasStdExtZfh()) {
+      for (auto VT : {RISCVVMVTs::vfloat16mf4_t, RISCVVMVTs::vfloat16mf2_t,
+                      RISCVVMVTs::vfloat16m1_t, RISCVVMVTs::vfloat16m2_t,
+                      RISCVVMVTs::vfloat16m4_t, RISCVVMVTs::vfloat16m8_t}) {
+        setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
+        for (auto CC : VFPCCToExpand)
+          setCondCodeAction(CC, VT, Expand);
+      }
+    }
+
+    if (Subtarget.hasStdExtF()) {
+      for (auto VT : {RISCVVMVTs::vfloat32mf2_t, RISCVVMVTs::vfloat32m1_t,
+                      RISCVVMVTs::vfloat32m2_t, RISCVVMVTs::vfloat32m4_t,
+                      RISCVVMVTs::vfloat32m8_t}) {
+        setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
+        for (auto CC : VFPCCToExpand)
+          setCondCodeAction(CC, VT, Expand);
+      }
+    }
+
+    if (Subtarget.hasStdExtD()) {
+      for (auto VT : {RISCVVMVTs::vfloat64m1_t, RISCVVMVTs::vfloat64m2_t,
+                      RISCVVMVTs::vfloat64m4_t, RISCVVMVTs::vfloat64m8_t}) {
+        setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
+        for (auto CC : VFPCCToExpand)
+          setCondCodeAction(CC, VT, Expand);
+      }
+    }
   }
 
   // Function alignments.

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 0ef798937a66f..404c3050e6012 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -192,6 +192,45 @@ multiclass VPatIntegerSetCCSDNode_VX_VI<CondCode cc,
                                    SplatPat_simm5, simm5, swap>;
 }
 
+multiclass VPatFPSetCCSDNode_VV<CondCode cc, string instruction_name> {
+  foreach fvti = AllFloatVectors in
+    def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1),
+                                (fvti.Vector fvti.RegClass:$rs2),
+                                cc)),
+              (!cast<Instruction>(instruction_name#"_VV_"#fvti.LMul.MX)
+                  fvti.RegClass:$rs1, fvti.RegClass:$rs2, VLMax, fvti.SEW)>;
+}
+
+multiclass VPatFPSetCCSDNode_VF<CondCode cc, string instruction_name> {
+  foreach fvti = AllFloatVectors in
+    def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1),
+                                (fvti.Vector (splat_vector fvti.ScalarRegClass:$rs2)),
+                                cc)),
+              (!cast<Instruction>(instruction_name#"_VF_"#fvti.LMul.MX)
+                  fvti.RegClass:$rs1,
+                  ToFPR32<fvti.Scalar, fvti.ScalarRegClass, "rs2">.ret,
+                  VLMax, fvti.SEW)>;
+}
+
+multiclass VPatFPSetCCSDNode_FV<CondCode cc, string swapped_op_instruction_name> {
+  foreach fvti = AllFloatVectors in
+    def : Pat<(fvti.Mask (setcc (fvti.Vector (splat_vector fvti.ScalarRegClass:$rs2)),
+                                (fvti.Vector fvti.RegClass:$rs1),
+                                cc)),
+              (!cast<Instruction>(swapped_op_instruction_name#"_VF_"#fvti.LMul.MX)
+                  fvti.RegClass:$rs1,
+                  ToFPR32<fvti.Scalar, fvti.ScalarRegClass, "rs2">.ret,
+                  VLMax, fvti.SEW)>;
+}
+
+multiclass VPatFPSetCCSDNode_VV_VF_FV<CondCode cc,
+                                      string inst_name,
+                                      string swapped_op_inst_name> {
+  defm : VPatFPSetCCSDNode_VV<cc, inst_name>;
+  defm : VPatFPSetCCSDNode_VF<cc, inst_name>;
+  defm : VPatFPSetCCSDNode_FV<cc, swapped_op_inst_name>;
+}
+
 //===----------------------------------------------------------------------===//
 // Patterns.
 //===----------------------------------------------------------------------===//
@@ -299,6 +338,23 @@ foreach mti = AllMasks in {
 
 } // Predicates = [HasStdExtV]
 
+let Predicates = [HasStdExtV, HasStdExtF] in {
+
+// 14.11. Vector Floating-Point Compare Instructions
+defm "" : VPatFPSetCCSDNode_VV_VF_FV<SETEQ,  "PseudoVMFEQ", "PseudoVMFEQ">;
+defm "" : VPatFPSetCCSDNode_VV_VF_FV<SETOEQ, "PseudoVMFEQ", "PseudoVMFEQ">;
+
+defm "" : VPatFPSetCCSDNode_VV_VF_FV<SETNE,  "PseudoVMFNE", "PseudoVMFNE">;
+defm "" : VPatFPSetCCSDNode_VV_VF_FV<SETUNE, "PseudoVMFNE", "PseudoVMFNE">;
+
+defm "" : VPatFPSetCCSDNode_VV_VF_FV<SETLT,  "PseudoVMFLT", "PseudoVMFGT">;
+defm "" : VPatFPSetCCSDNode_VV_VF_FV<SETOLT, "PseudoVMFLT", "PseudoVMFGT">;
+
+defm "" : VPatFPSetCCSDNode_VV_VF_FV<SETLE,  "PseudoVMFLE", "PseudoVMFGE">;
+defm "" : VPatFPSetCCSDNode_VV_VF_FV<SETOLE, "PseudoVMFLE", "PseudoVMFGE">;
+
+} // Predicates = [HasStdExtV, HasStdExtF]
+
 //===----------------------------------------------------------------------===//
 // Vector Splats
 //===----------------------------------------------------------------------===//
@@ -333,3 +389,16 @@ foreach vti = AllIntegerVectors in {
   }
 }
 } // Predicates = [HasStdExtV, IsRV32]
+
+let Predicates = [HasStdExtV, HasStdExtF] in {
+foreach fvti = AllFloatVectors in {
+  def : Pat<(fvti.Vector (splat_vector fvti.ScalarRegClass:$rs1)),
+            (!cast<Instruction>("PseudoVFMV_V_F_"#fvti.LMul.MX)
+              ToFPR32<fvti.Scalar, fvti.ScalarRegClass, "rs1">.ret,
+              VLMax, fvti.SEW)>;
+
+  def : Pat<(fvti.Vector (splat_vector (fvti.Scalar fpimm0))),
+            (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX)
+              0, VLMax, fvti.SEW)>;
+}
+} // Predicates = [HasStdExtV, HasStdExtF]

diff  --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll
new file mode 100644
index 0000000000000..3fefbb727413b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll
@@ -0,0 +1,2748 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+; FIXME: The scalar/vector operations ('fv' tests) should swap operands and
+; condition codes accordingly in order to generate a 'vf' instruction.
+
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_oeq_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp oeq <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_oeq_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oeq <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_oeq_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oeq <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_oeq_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp oeq <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_oeq_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oeq <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_ogt_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v18, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp ogt <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ogt_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ogt <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ogt_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ogt <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_ogt_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v18, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp ogt <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_ogt_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ogt <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_oge_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v18, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp oge <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_oge_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oge <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_oge_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oge <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_oge_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v18, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp oge <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_oge_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oge <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_olt_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp olt <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_olt_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp olt <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_olt_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp olt <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_olt_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp olt <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_olt_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp olt <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_ole_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp ole <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ole_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ole <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ole_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ole <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_ole_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp ole <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_ole_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ole <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_one_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfne.vv v25, v16, v18
+; CHECK-NEXT:    vmfeq.vv v26, v18, v18
+; CHECK-NEXT:    vmfeq.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v26, v27, v26
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp one <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_one_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vmfne.vf v25, v16, fa0
+; CHECK-NEXT:    vmfeq.vf v28, v26, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v26, v26, v28
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp one <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_one_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vmfne.vf v25, v16, fa0
+; CHECK-NEXT:    vmfeq.vf v28, v26, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v26, v28, v26
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp one <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_one_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfne.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp one <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_one_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp one <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_ord_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vv v25, v18, v18
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %vc = fcmp ord <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ord_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v26, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ord <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ord_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v26, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ord <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_ord_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vv v25, v18, v18
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %vc = fcmp ord <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_ord_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v26, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ord <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_ueq_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vv v25, v16, v18
+; CHECK-NEXT:    vmfne.vv v26, v18, v18
+; CHECK-NEXT:    vmfne.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v26, v27, v26
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ueq <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ueq_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v16, fa0
+; CHECK-NEXT:    vmfne.vf v28, v26, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v26, v26, v28
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ueq <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ueq_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v16, fa0
+; CHECK-NEXT:    vmfne.vf v28, v26, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v26, v28, v26
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ueq <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_ueq_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp ueq <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_ueq_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ueq <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_ugt_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vv v25, v16, v18
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ugt <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ugt_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ugt <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ugt_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfge.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ugt <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_ugt_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v18, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp ugt <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_ugt_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ugt <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_uge_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vv v25, v16, v18
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp uge <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_uge_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uge <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_uge_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfgt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uge <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_uge_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v18, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp uge <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_uge_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uge <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_ult_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vv v25, v18, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ult <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ult_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfge.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ult <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ult_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ult <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_ult_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp ult <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_ult_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ult <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_ule_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vv v25, v18, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ule <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ule_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfgt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ule <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ule_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ule <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_ule_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp ule <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_ule_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ule <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_une_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfne.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp une <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_une_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp une <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_une_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp une <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_une_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfne.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp une <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_une_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp une <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_uno_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfne.vv v25, v18, v18
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %vc = fcmp uno <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_uno_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vmfne.vf v25, v26, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uno <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_uno_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vmfne.vf v25, v26, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uno <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_uno_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfne.vv v25, v18, v18
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %vc = fcmp uno <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_uno_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vmfne.vf v25, v26, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uno <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_oeq_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp oeq <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_oeq_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oeq <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_oeq_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oeq <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_oeq_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp oeq <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_oeq_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oeq <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_ogt_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v20, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp ogt <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ogt_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ogt <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ogt_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ogt <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_ogt_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v20, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp ogt <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_ogt_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ogt <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_oge_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v20, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp oge <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_oge_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oge <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_oge_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oge <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_oge_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v20, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp oge <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_oge_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oge <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_olt_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp olt <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_olt_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp olt <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_olt_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp olt <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_olt_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp olt <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_olt_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp olt <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_ole_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp ole <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ole_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ole <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ole_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ole <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_ole_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp ole <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_ole_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ole <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_one_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfne.vv v25, v16, v20
+; CHECK-NEXT:    vmfeq.vv v26, v20, v20
+; CHECK-NEXT:    vmfeq.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v26, v27, v26
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp one <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_one_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vmfne.vf v25, v16, fa0
+; CHECK-NEXT:    vmfeq.vf v26, v28, fa0
+; CHECK-NEXT:    vmfeq.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v26, v27, v26
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp one <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_one_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vmfne.vf v25, v16, fa0
+; CHECK-NEXT:    vmfeq.vf v26, v28, fa0
+; CHECK-NEXT:    vmfeq.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v26, v26, v27
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp one <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_one_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfne.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp one <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_one_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp one <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_ord_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vv v25, v20, v20
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %vc = fcmp ord <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ord_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v28, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ord <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ord_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v28, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ord <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_ord_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vv v25, v20, v20
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %vc = fcmp ord <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_ord_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v28, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ord <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_ueq_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vv v25, v16, v20
+; CHECK-NEXT:    vmfne.vv v26, v20, v20
+; CHECK-NEXT:    vmfne.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v26, v27, v26
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ueq <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ueq_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v16, fa0
+; CHECK-NEXT:    vmfne.vf v26, v28, fa0
+; CHECK-NEXT:    vmfne.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v26, v27, v26
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ueq <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ueq_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v16, fa0
+; CHECK-NEXT:    vmfne.vf v26, v28, fa0
+; CHECK-NEXT:    vmfne.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v26, v26, v27
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ueq <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_ueq_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp ueq <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_ueq_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ueq <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_ugt_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vv v25, v16, v20
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ugt <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ugt_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ugt <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ugt_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfge.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ugt <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_ugt_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v20, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp ugt <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_ugt_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ugt <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_uge_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vv v25, v16, v20
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp uge <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_uge_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uge <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_uge_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfgt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uge <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_uge_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v20, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp uge <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_uge_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uge <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_ult_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vv v25, v20, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ult <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ult_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfge.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ult <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ult_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ult <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_ult_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp ult <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_ult_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ult <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_ule_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vv v25, v20, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ule <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ule_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfgt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ule <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ule_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ule <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_ule_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp ule <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_ule_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ule <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_une_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfne.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp une <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_une_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp une <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_une_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp une <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_une_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfne.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp une <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_une_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp une <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_uno_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfne.vv v25, v20, v20
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %vc = fcmp uno <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_uno_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vmfne.vf v25, v28, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uno <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_uno_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vmfne.vf v25, v28, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uno <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_uno_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfne.vv v25, v20, v20
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %vc = fcmp uno <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_uno_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vmfne.vf v25, v28, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uno <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_oeq_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfeq.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp oeq <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_oeq_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oeq <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_oeq_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oeq <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_oeq_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfeq.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp oeq <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_oeq_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oeq <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_ogt_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmflt.vv v0, v8, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp ogt <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ogt_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ogt <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ogt_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ogt <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_ogt_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmflt.vv v0, v8, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp ogt <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_ogt_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ogt <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_oge_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfle.vv v0, v8, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp oge <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_oge_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oge <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_oge_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oge <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_oge_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfle.vv v0, v8, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp oge <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_oge_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oge <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_olt_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmflt.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp olt <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_olt_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp olt <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_olt_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp olt <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_olt_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmflt.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp olt <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_olt_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp olt <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_ole_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfle.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp ole <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ole_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ole <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ole_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ole <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_ole_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfle.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp ole <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_ole_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ole <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_one_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfne.vv v25, v16, v8
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vmfeq.vv v27, v8, v8
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v26, v26, v27
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp one <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_one_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vmfne.vf v25, v16, fa0
+; CHECK-NEXT:    vmfeq.vf v26, v8, fa0
+; CHECK-NEXT:    vmfeq.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v26, v27, v26
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp one <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_one_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vmfne.vf v25, v16, fa0
+; CHECK-NEXT:    vmfeq.vf v26, v8, fa0
+; CHECK-NEXT:    vmfeq.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v26, v26, v27
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp one <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_one_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfne.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp one <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_one_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp one <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_ord_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfeq.vv v25, v16, v16
+; CHECK-NEXT:    vmfeq.vv v26, v8, v8
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ord <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ord_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v8, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ord <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ord_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v8, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ord <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_ord_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfeq.vv v25, v16, v16
+; CHECK-NEXT:    vmfeq.vv v26, v8, v8
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ord <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_ord_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v8, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ord <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_ueq_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfeq.vv v25, v16, v8
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vmfne.vv v27, v8, v8
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v26, v26, v27
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ueq <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ueq_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v16, fa0
+; CHECK-NEXT:    vmfne.vf v26, v8, fa0
+; CHECK-NEXT:    vmfne.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v26, v27, v26
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ueq <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ueq_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v16, fa0
+; CHECK-NEXT:    vmfne.vf v26, v8, fa0
+; CHECK-NEXT:    vmfne.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v26, v26, v27
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ueq <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_ueq_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfeq.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp ueq <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_ueq_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ueq <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_ugt_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfle.vv v25, v16, v8
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ugt <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ugt_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfle.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ugt <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ugt_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfge.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ugt <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_ugt_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmflt.vv v0, v8, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp ugt <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_ugt_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ugt <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_uge_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmflt.vv v25, v16, v8
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp uge <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_uge_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmflt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uge <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_uge_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfgt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uge <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_uge_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfle.vv v0, v8, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp uge <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_uge_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uge <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_ult_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfle.vv v25, v8, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ult <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ult_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfge.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ult <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ult_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfle.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ult <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_ult_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmflt.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp ult <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_ult_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ult <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_ule_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmflt.vv v25, v8, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ule <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ule_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfgt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ule <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ule_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmflt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ule <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_ule_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfle.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp ule <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_ule_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ule <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_une_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfne.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp une <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_une_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp une <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_une_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp une <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_une_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfne.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp une <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_une_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp une <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_uno_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfne.vv v25, v16, v16
+; CHECK-NEXT:    vmfne.vv v26, v8, v8
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp uno <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_uno_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vmfne.vf v25, v8, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uno <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_uno_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vmfne.vf v25, v8, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uno <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_uno_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfne.vv v25, v16, v16
+; CHECK-NEXT:    vmfne.vv v26, v8, v8
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp uno <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_uno_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vmfne.vf v25, v8, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uno <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+attributes #0 = { "no-nans-fp-math"="true" }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll
new file mode 100644
index 0000000000000..0129169fa93de
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll
@@ -0,0 +1,2748 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+; FIXME: The scalar/vector operations ('fv' tests) should swap operands and
+; condition codes accordingly in order to generate a 'vf' instruction.
+
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_oeq_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp oeq <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_oeq_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oeq <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_oeq_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oeq <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_oeq_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp oeq <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_oeq_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oeq <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_ogt_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v18, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp ogt <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ogt_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ogt <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ogt_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ogt <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_ogt_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v18, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp ogt <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_ogt_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ogt <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_oge_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v18, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp oge <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_oge_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oge <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_oge_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oge <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_oge_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v18, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp oge <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_oge_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oge <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_olt_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp olt <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_olt_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp olt <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_olt_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp olt <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_olt_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp olt <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_olt_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp olt <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_ole_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp ole <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ole_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ole <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ole_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ole <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_ole_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp ole <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_ole_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ole <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_one_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfne.vv v25, v16, v18
+; CHECK-NEXT:    vmfeq.vv v26, v18, v18
+; CHECK-NEXT:    vmfeq.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v26, v27, v26
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp one <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_one_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vmfne.vf v25, v16, fa0
+; CHECK-NEXT:    vmfeq.vf v28, v26, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v26, v26, v28
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp one <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_one_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vmfne.vf v25, v16, fa0
+; CHECK-NEXT:    vmfeq.vf v28, v26, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v26, v28, v26
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp one <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_one_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfne.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp one <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_one_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp one <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_ord_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vv v25, v18, v18
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %vc = fcmp ord <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ord_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v26, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ord <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ord_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v26, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ord <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_ord_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vv v25, v18, v18
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %vc = fcmp ord <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_ord_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v26, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ord <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_ueq_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vv v25, v16, v18
+; CHECK-NEXT:    vmfne.vv v26, v18, v18
+; CHECK-NEXT:    vmfne.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v26, v27, v26
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ueq <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ueq_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v16, fa0
+; CHECK-NEXT:    vmfne.vf v28, v26, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v26, v26, v28
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ueq <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ueq_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v16, fa0
+; CHECK-NEXT:    vmfne.vf v28, v26, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v26, v28, v26
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ueq <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_ueq_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp ueq <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_ueq_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ueq <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_ugt_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vv v25, v16, v18
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ugt <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ugt_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ugt <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ugt_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfge.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ugt <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_ugt_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v18, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp ugt <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_ugt_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ugt <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_uge_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vv v25, v16, v18
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp uge <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_uge_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uge <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_uge_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfgt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uge <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_uge_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v18, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp uge <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_uge_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uge <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_ult_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vv v25, v18, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ult <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ult_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfge.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ult <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ult_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ult <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_ult_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp ult <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_ult_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ult <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_ule_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vv v25, v18, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ule <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ule_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfgt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ule <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_ule_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmflt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ule <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_ule_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp ule <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_ule_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ule <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_une_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfne.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp une <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_une_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp une <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_une_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp une <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_une_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfne.vv v0, v16, v18
+; CHECK-NEXT:    ret
+  %vc = fcmp une <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_une_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp une <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+; CHECK-LABEL: fcmp_uno_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfne.vv v25, v18, v18
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %vc = fcmp uno <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_uno_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vmfne.vf v25, v26, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uno <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+; CHECK-LABEL: fcmp_uno_fv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vmfne.vf v25, v26, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uno <vscale x 8 x half> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+; CHECK-LABEL: fcmp_uno_vv_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmfne.vv v25, v18, v18
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %vc = fcmp uno <vscale x 8 x half> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+; CHECK-LABEL: fcmp_uno_vf_nxv8f16_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vmfne.vf v25, v26, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uno <vscale x 8 x half> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_oeq_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp oeq <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_oeq_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oeq <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_oeq_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oeq <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_oeq_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp oeq <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_oeq_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oeq <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_ogt_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v20, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp ogt <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ogt_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ogt <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ogt_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ogt <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_ogt_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v20, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp ogt <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_ogt_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ogt <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_oge_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v20, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp oge <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_oge_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oge <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_oge_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oge <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_oge_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v20, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp oge <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_oge_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oge <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_olt_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp olt <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_olt_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp olt <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_olt_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp olt <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_olt_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp olt <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_olt_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp olt <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_ole_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp ole <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ole_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ole <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ole_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ole <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_ole_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp ole <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_ole_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ole <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_one_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfne.vv v25, v16, v20
+; CHECK-NEXT:    vmfeq.vv v26, v20, v20
+; CHECK-NEXT:    vmfeq.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v26, v27, v26
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp one <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_one_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vmfne.vf v25, v16, fa0
+; CHECK-NEXT:    vmfeq.vf v26, v28, fa0
+; CHECK-NEXT:    vmfeq.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v26, v27, v26
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp one <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_one_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vmfne.vf v25, v16, fa0
+; CHECK-NEXT:    vmfeq.vf v26, v28, fa0
+; CHECK-NEXT:    vmfeq.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v26, v26, v27
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp one <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_one_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfne.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp one <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_one_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp one <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_ord_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vv v25, v20, v20
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %vc = fcmp ord <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ord_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v28, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ord <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ord_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v28, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ord <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_ord_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vv v25, v20, v20
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %vc = fcmp ord <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_ord_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v28, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ord <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_ueq_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vv v25, v16, v20
+; CHECK-NEXT:    vmfne.vv v26, v20, v20
+; CHECK-NEXT:    vmfne.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v26, v27, v26
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ueq <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ueq_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v16, fa0
+; CHECK-NEXT:    vmfne.vf v26, v28, fa0
+; CHECK-NEXT:    vmfne.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v26, v27, v26
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ueq <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ueq_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v16, fa0
+; CHECK-NEXT:    vmfne.vf v26, v28, fa0
+; CHECK-NEXT:    vmfne.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v26, v26, v27
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ueq <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_ueq_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp ueq <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_ueq_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ueq <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_ugt_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vv v25, v16, v20
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ugt <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ugt_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ugt <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ugt_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfge.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ugt <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_ugt_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v20, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp ugt <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_ugt_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ugt <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_uge_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vv v25, v16, v20
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp uge <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_uge_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uge <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_uge_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfgt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uge <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_uge_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v20, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp uge <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_uge_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uge <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_ult_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vv v25, v20, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ult <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ult_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfge.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ult <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ult_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ult <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_ult_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp ult <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_ult_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ult <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_ule_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vv v25, v20, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ule <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ule_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfgt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ule <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_ule_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmflt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ule <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_ule_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp ule <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_ule_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ule <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_une_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfne.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp une <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_une_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp une <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_une_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp une <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_une_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfne.vv v0, v16, v20
+; CHECK-NEXT:    ret
+  %vc = fcmp une <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_une_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp une <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: fcmp_uno_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfne.vv v25, v20, v20
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %vc = fcmp uno <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_uno_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vmfne.vf v25, v28, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uno <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: fcmp_uno_fv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vmfne.vf v25, v28, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uno <vscale x 8 x float> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+; CHECK-LABEL: fcmp_uno_vv_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmfne.vv v25, v20, v20
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %vc = fcmp uno <vscale x 8 x float> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+; CHECK-LABEL: fcmp_uno_vf_nxv8f32_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vmfne.vf v25, v28, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uno <vscale x 8 x float> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_oeq_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfeq.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp oeq <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_oeq_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oeq <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_oeq_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oeq <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_oeq_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfeq.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp oeq <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_oeq_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oeq <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_ogt_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmflt.vv v0, v8, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp ogt <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ogt_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ogt <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ogt_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ogt <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_ogt_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmflt.vv v0, v8, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp ogt <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_ogt_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ogt <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_oge_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfle.vv v0, v8, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp oge <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_oge_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oge <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_oge_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oge <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_oge_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfle.vv v0, v8, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp oge <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_oge_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp oge <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_olt_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmflt.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp olt <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_olt_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp olt <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_olt_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp olt <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_olt_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmflt.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp olt <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_olt_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp olt <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_ole_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfle.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp ole <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ole_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ole <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ole_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ole <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_ole_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfle.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp ole <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_ole_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ole <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_one_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfne.vv v25, v16, v8
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vmfeq.vv v27, v8, v8
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v26, v26, v27
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp one <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_one_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vmfne.vf v25, v16, fa0
+; CHECK-NEXT:    vmfeq.vf v26, v8, fa0
+; CHECK-NEXT:    vmfeq.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v26, v27, v26
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp one <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_one_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vmfne.vf v25, v16, fa0
+; CHECK-NEXT:    vmfeq.vf v26, v8, fa0
+; CHECK-NEXT:    vmfeq.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v26, v26, v27
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp one <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_one_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfne.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp one <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_one_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp one <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_ord_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfeq.vv v25, v16, v16
+; CHECK-NEXT:    vmfeq.vv v26, v8, v8
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ord <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ord_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v8, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ord <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ord_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v8, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ord <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_ord_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfeq.vv v25, v16, v16
+; CHECK-NEXT:    vmfeq.vv v26, v8, v8
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ord <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_ord_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v8, fa0
+; CHECK-NEXT:    vmfeq.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ord <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_ueq_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfeq.vv v25, v16, v8
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vmfne.vv v27, v8, v8
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v26, v26, v27
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ueq <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ueq_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v16, fa0
+; CHECK-NEXT:    vmfne.vf v26, v8, fa0
+; CHECK-NEXT:    vmfne.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v26, v27, v26
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ueq <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ueq_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v16, fa0
+; CHECK-NEXT:    vmfne.vf v26, v8, fa0
+; CHECK-NEXT:    vmfne.vv v27, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v26, v26, v27
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ueq <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_ueq_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfeq.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp ueq <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_ueq_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfeq.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ueq <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_ugt_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfle.vv v25, v16, v8
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ugt <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ugt_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfle.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ugt <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ugt_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfge.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ugt <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_ugt_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmflt.vv v0, v8, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp ugt <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_ugt_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfgt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ugt <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_uge_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmflt.vv v25, v16, v8
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp uge <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_uge_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmflt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uge <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_uge_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfgt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uge <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_uge_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfle.vv v0, v8, v16
+; CHECK-NEXT:    ret
+  %vc = fcmp uge <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_uge_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfge.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uge <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_ult_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfle.vv v25, v8, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ult <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ult_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfge.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ult <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ult_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfle.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ult <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_ult_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmflt.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp ult <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_ult_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmflt.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ult <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_ule_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmflt.vv v25, v8, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp ule <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ule_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfgt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ule <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_ule_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmflt.vf v25, v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v26
+; CHECK-NEXT:    vmxor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ule <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_ule_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfle.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp ule <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_ule_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfle.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp ule <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_une_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfne.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp une <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_une_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp une <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_une_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp une <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_une_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfne.vv v0, v16, v8
+; CHECK-NEXT:    ret
+  %vc = fcmp une <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_une_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmfne.vf v0, v16, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp une <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+; CHECK-LABEL: fcmp_uno_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfne.vv v25, v16, v16
+; CHECK-NEXT:    vmfne.vv v26, v8, v8
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp uno <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_uno_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vmfne.vf v25, v8, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uno <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+; CHECK-LABEL: fcmp_uno_fv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vmfne.vf v25, v8, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uno <vscale x 8 x double> %splat, %va
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+; CHECK-LABEL: fcmp_uno_vv_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmfne.vv v25, v16, v16
+; CHECK-NEXT:    vmfne.vv v26, v8, v8
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    ret
+  %vc = fcmp uno <vscale x 8 x double> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+; CHECK-LABEL: fcmp_uno_vf_nxv8f64_nonans:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vmfne.vf v25, v8, fa0
+; CHECK-NEXT:    vmfne.vv v26, v16, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = fcmp uno <vscale x 8 x double> %va, %splat
+  ret <vscale x 8 x i1> %vc
+}
+
+attributes #0 = { "no-nans-fp-math"="true" }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll
new file mode 100644
index 0000000000000..72d1c85de1705
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll
@@ -0,0 +1,109 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+f,+d,+experimental-zfh,+experimental-v -target-abi ilp32d -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefix=RV32V
+; RUN: llc -mtriple=riscv64 -mattr=+f,+d,+experimental-zfh,+experimental-v -target-abi lp64d -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefix=RV64V
+
+define <vscale x 8 x half> @vsplat_nxv8f16(half %f) {
+; RV32V-LABEL: vsplat_nxv8f16:
+; RV32V:       # %bb.0:
+; RV32V-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; RV32V-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; RV32V-NEXT:    vfmv.v.f v16, fa0
+; RV32V-NEXT:    ret
+;
+; RV64V-LABEL: vsplat_nxv8f16:
+; RV64V:       # %bb.0:
+; RV64V-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; RV64V-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; RV64V-NEXT:    vfmv.v.f v16, fa0
+; RV64V-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %f, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x half> %splat
+}
+
+define <vscale x 8 x half> @vsplat_zero_nxv8f16() {
+; RV32V-LABEL: vsplat_zero_nxv8f16:
+; RV32V:       # %bb.0:
+; RV32V-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; RV32V-NEXT:    vmv.v.i v16, 0
+; RV32V-NEXT:    ret
+;
+; RV64V-LABEL: vsplat_zero_nxv8f16:
+; RV64V:       # %bb.0:
+; RV64V-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; RV64V-NEXT:    vmv.v.i v16, 0
+; RV64V-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half zeroinitializer, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x half> %splat
+}
+
+define <vscale x 8 x float> @vsplat_nxv8f32(float %f) {
+; RV32V-LABEL: vsplat_nxv8f32:
+; RV32V:       # %bb.0:
+; RV32V-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; RV32V-NEXT:    vfmv.v.f v16, fa0
+; RV32V-NEXT:    ret
+;
+; RV64V-LABEL: vsplat_nxv8f32:
+; RV64V:       # %bb.0:
+; RV64V-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; RV64V-NEXT:    vfmv.v.f v16, fa0
+; RV64V-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %f, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x float> %splat
+}
+
+define <vscale x 8 x float> @vsplat_zero_nxv8f32() {
+; RV32V-LABEL: vsplat_zero_nxv8f32:
+; RV32V:       # %bb.0:
+; RV32V-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; RV32V-NEXT:    vmv.v.i v16, 0
+; RV32V-NEXT:    ret
+;
+; RV64V-LABEL: vsplat_zero_nxv8f32:
+; RV64V:       # %bb.0:
+; RV64V-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; RV64V-NEXT:    vmv.v.i v16, 0
+; RV64V-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float zeroinitializer, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x float> %splat
+}
+
+define <vscale x 8 x double> @vsplat_nxv8f64(double %f) {
+; RV32V-LABEL: vsplat_nxv8f64:
+; RV32V:       # %bb.0:
+; RV32V-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; RV32V-NEXT:    vfmv.v.f v16, fa0
+; RV32V-NEXT:    ret
+;
+; RV64V-LABEL: vsplat_nxv8f64:
+; RV64V:       # %bb.0:
+; RV64V-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; RV64V-NEXT:    vfmv.v.f v16, fa0
+; RV64V-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %f, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x double> %splat
+}
+
+define <vscale x 8 x double> @vsplat_zero_nxv8f64() {
+; RV32V-LABEL: vsplat_zero_nxv8f64:
+; RV32V:       # %bb.0:
+; RV32V-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; RV32V-NEXT:    vmv.v.i v16, 0
+; RV32V-NEXT:    ret
+;
+; RV64V-LABEL: vsplat_zero_nxv8f64:
+; RV64V:       # %bb.0:
+; RV64V-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; RV64V-NEXT:    vmv.v.i v16, 0
+; RV64V-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double zeroinitializer, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x double> %splat
+}


        


More information about the llvm-branch-commits mailing list