[llvm] 5278cc3 - [RISCV] Support select/merge like ops for fp16 vectors when only have Zvfhmin

Jianjian GUAN via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 26 23:53:22 PDT 2023


Author: Jianjian GUAN
Date: 2023-09-27T14:53:14+08:00
New Revision: 5278cc364b1dc518920c427a87094cb0855a0758

URL: https://github.com/llvm/llvm-project/commit/5278cc364b1dc518920c427a87094cb0855a0758
DIFF: https://github.com/llvm/llvm-project/commit/5278cc364b1dc518920c427a87094cb0855a0758.diff

LOG: [RISCV] Support select/merge like ops for fp16 vectors when only have Zvfhmin

This patch supports VP_MERGE, VP_SELECT, SELECT, SELECT_CC for fp16 vectors when only have Zvfhmin.

Reviewed By: michaelmaitland

Differential Revision: https://reviews.llvm.org/D159053

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
    llvm/test/CodeGen/RISCV/rvv/select-fp.ll
    llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
    llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index f6663c7f435ad70..2b8e5aeeb86405a 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -822,12 +822,12 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
 
     // TODO: support more ops.
     static const unsigned ZvfhminPromoteOps[] = {
-        ISD::FMINNUM,    ISD::FMAXNUM,    ISD::FADD,         ISD::FSUB,
-        ISD::FMUL,       ISD::FMA,        ISD::FDIV,         ISD::FSQRT,
-        ISD::FABS,       ISD::FNEG,       ISD::FCOPYSIGN,    ISD::FCEIL,
-        ISD::FFLOOR,     ISD::FROUND,     ISD::FROUNDEVEN,   ISD::FRINT,
-        ISD::FNEARBYINT, ISD::IS_FPCLASS, ISD::SPLAT_VECTOR, ISD::SETCC,
-        ISD::FMAXIMUM,   ISD::FMINIMUM};
+        ISD::FMINNUM,    ISD::FMAXNUM,    ISD::FADD,       ISD::FSUB,
+        ISD::FMUL,       ISD::FMA,        ISD::FDIV,       ISD::FSQRT,
+        ISD::FABS,       ISD::FNEG,       ISD::FCOPYSIGN,  ISD::FCEIL,
+        ISD::FFLOOR,     ISD::FROUND,     ISD::FROUNDEVEN, ISD::FRINT,
+        ISD::FNEARBYINT, ISD::IS_FPCLASS, ISD::SETCC,      ISD::FMAXIMUM,
+        ISD::FMINIMUM};
 
     // TODO: support more vp ops.
     static const unsigned ZvfhminPromoteVPOps[] = {
@@ -937,12 +937,16 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
           continue;
         setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
         setOperationAction({ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND}, VT, Custom);
+        setOperationAction({ISD::VP_MERGE, ISD::VP_SELECT, ISD::SELECT}, VT,
+                           Custom);
+        setOperationAction(ISD::SELECT_CC, VT, Expand);
         setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP,
                             ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP},
                            VT, Custom);
         setOperationAction({ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR,
                             ISD::EXTRACT_SUBVECTOR, ISD::SCALAR_TO_VECTOR},
                            VT, Custom);
+        setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
         // load/store
         setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
 
@@ -1144,6 +1148,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
             !Subtarget.hasVInstructionsF16()) {
           setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
           setOperationAction({ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND}, VT, Custom);
+          setOperationAction(
+              {ISD::VP_MERGE, ISD::VP_SELECT, ISD::VSELECT, ISD::SELECT}, VT,
+              Custom);
           setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP,
                               ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP},
                              VT, Custom);
@@ -1151,6 +1158,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
                               ISD::EXTRACT_SUBVECTOR, ISD::SCALAR_TO_VECTOR},
                              VT, Custom);
           setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
+          setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
           MVT F32VecVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
           // Don't promote f16 vector operations to f32 if f32 vector type is
           // not legal.
@@ -5989,10 +5997,21 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
   case ISD::BUILD_VECTOR:
     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
   case ISD::SPLAT_VECTOR:
-    if (Op.getValueType() == MVT::nxv32f16 &&
+    if (Op.getValueType().getScalarType() == MVT::f16 &&
         (Subtarget.hasVInstructionsF16Minimal() &&
-         !Subtarget.hasVInstructionsF16()))
-      return SplitVectorOp(Op, DAG);
+         !Subtarget.hasVInstructionsF16())) {
+      if (Op.getValueType() == MVT::nxv32f16)
+        return SplitVectorOp(Op, DAG);
+      SDLoc DL(Op);
+      SDValue NewScalar =
+          DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
+      SDValue NewSplat = DAG.getNode(
+          ISD::SPLAT_VECTOR, DL,
+          MVT::getVectorVT(MVT::f32, Op.getValueType().getVectorElementCount()),
+          NewScalar);
+      return DAG.getNode(ISD::FP_ROUND, DL, Op.getValueType(), NewSplat,
+                         DAG.getIntPtrConstant(0, DL, /*isTarget=*/true));
+    }
     if (Op.getValueType().getVectorElementType() == MVT::i1)
       return lowerVectorMaskSplat(Op, DAG);
     return SDValue();

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 6d68e04987156ec..b7c8457037947c0 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -1358,7 +1358,8 @@ defm : VPatFPSetCCSDNode_VV_VF_FV<SETOLE, "PseudoVMFLE", "PseudoVMFGE">;
 // 11.15. Vector Integer Merge Instructions
 // 13.15. Vector Floating-Point Merge Instruction
 foreach fvti = AllFloatVectors in {
-  let Predicates = GetVTypePredicates<fvti>.Predicates in {
+  defvar ivti = GetIntVTypeInfo<fvti>.Vti;
+  let Predicates = GetVTypePredicates<ivti>.Predicates in {
     def : Pat<(fvti.Vector (vselect (fvti.Mask V0), fvti.RegClass:$rs1,
                                                           fvti.RegClass:$rs2)),
               (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
@@ -1366,6 +1367,15 @@ foreach fvti = AllFloatVectors in {
                    fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0),
                    fvti.AVL, fvti.Log2SEW)>;
 
+    def : Pat<(fvti.Vector (vselect (fvti.Mask V0),
+                                    (SplatFPOp (fvti.Scalar fpimm0)),
+                                    fvti.RegClass:$rs2)),
+              (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
+                   (fvti.Vector (IMPLICIT_DEF)),
+                   fvti.RegClass:$rs2, 0, (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>;
+
+  }
+  let Predicates = GetVTypePredicates<fvti>.Predicates in 
     def : Pat<(fvti.Vector (vselect (fvti.Mask V0),
                                     (SplatFPOp fvti.ScalarRegClass:$rs1),
                                     fvti.RegClass:$rs2)),
@@ -1374,14 +1384,6 @@ foreach fvti = AllFloatVectors in {
                    fvti.RegClass:$rs2,
                    (fvti.Scalar fvti.ScalarRegClass:$rs1),
                    (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>;
-
-    def : Pat<(fvti.Vector (vselect (fvti.Mask V0),
-                                    (SplatFPOp (fvti.Scalar fpimm0)),
-                                    fvti.RegClass:$rs2)),
-              (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
-                   (fvti.Vector (IMPLICIT_DEF)),
-                   fvti.RegClass:$rs2, 0, (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>;
-  }
 }
 
 // 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions
@@ -1422,18 +1424,18 @@ foreach fvtiToFWti = AllWidenableFloatVectors in {
 //===----------------------------------------------------------------------===//
 
 foreach fvti = AllFloatVectors in {
-  let Predicates = GetVTypePredicates<fvti>.Predicates in {
+  let Predicates = GetVTypePredicates<fvti>.Predicates in
     def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl undef, fvti.ScalarRegClass:$rs1, srcvalue)),
               (!cast<Instruction>("PseudoVFMV_V_"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
                 (fvti.Vector (IMPLICIT_DEF)),
                 (fvti.Scalar fvti.ScalarRegClass:$rs1),
                 fvti.AVL, fvti.Log2SEW, TA_MA)>;
-
+  defvar ivti = GetIntVTypeInfo<fvti>.Vti;
+  let Predicates = GetVTypePredicates<ivti>.Predicates in
     def : Pat<(fvti.Vector (SplatFPOp (fvti.Scalar fpimm0))),
               (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX)
                 (fvti.Vector (IMPLICIT_DEF)),
                 0, fvti.AVL, fvti.Log2SEW, TA_MA)>;
-  }
 }
 
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 78b67e59082308c..fb2316202ba7f94 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -2478,7 +2478,8 @@ foreach fvti = AllFloatVectors in {
   // Floating-point vselects:
   // 11.15. Vector Integer Merge Instructions
   // 13.15. Vector Floating-Point Merge Instruction
-  let Predicates = GetVTypePredicates<fvti>.Predicates in {
+  defvar ivti = GetIntVTypeInfo<fvti>.Vti;
+  let Predicates = GetVTypePredicates<ivti>.Predicates in {
     def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0),
                                              fvti.RegClass:$rs1,
                                              fvti.RegClass:$rs2,
@@ -2488,16 +2489,6 @@ foreach fvti = AllFloatVectors in {
                    fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0),
                    GPR:$vl, fvti.Log2SEW)>;
 
-    def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0),
-                                             (SplatFPOp fvti.ScalarRegClass:$rs1),
-                                             fvti.RegClass:$rs2,
-                                             VLOpFrag)),
-              (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
-                   (fvti.Vector (IMPLICIT_DEF)),
-                   fvti.RegClass:$rs2,
-                   (fvti.Scalar fvti.ScalarRegClass:$rs1),
-                   (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
-
     def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0),
                                              (SplatFPOp (SelectFPImm (XLenVT GPR:$imm))),
                                              fvti.RegClass:$rs2,
@@ -2525,21 +2516,33 @@ foreach fvti = AllFloatVectors in {
                    GPR:$vl, fvti.Log2SEW)>;
 
     def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask V0),
-                                              (SplatFPOp fvti.ScalarRegClass:$rs1),
+                                              (SplatFPOp (fvti.Scalar fpimm0)),
                                               fvti.RegClass:$rs2,
                                               VLOpFrag)),
+              (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
+                   fvti.RegClass:$rs2, fvti.RegClass:$rs2, 0, (fvti.Mask V0),
+                   GPR:$vl, fvti.Log2SEW)>;
+  }
+
+  let Predicates = GetVTypePredicates<fvti>.Predicates in {
+    def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0),
+                                             (SplatFPOp fvti.ScalarRegClass:$rs1),
+                                             fvti.RegClass:$rs2,
+                                             VLOpFrag)),
               (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
-                   fvti.RegClass:$rs2, fvti.RegClass:$rs2,
+                   (fvti.Vector (IMPLICIT_DEF)),
+                   fvti.RegClass:$rs2,
                    (fvti.Scalar fvti.ScalarRegClass:$rs1),
                    (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
 
     def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask V0),
-                                              (SplatFPOp (fvti.Scalar fpimm0)),
+                                              (SplatFPOp fvti.ScalarRegClass:$rs1),
                                               fvti.RegClass:$rs2,
                                               VLOpFrag)),
-              (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
-                   fvti.RegClass:$rs2, fvti.RegClass:$rs2, 0, (fvti.Mask V0),
-                   GPR:$vl, fvti.Log2SEW)>;
+              (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
+                   fvti.RegClass:$rs2, fvti.RegClass:$rs2,
+                   (fvti.Scalar fvti.ScalarRegClass:$rs1),
+                   (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
 
     // 13.16. Vector Floating-Point Move Instruction
     // If we're splatting fpimm0, use vmv.v.x vd, x0.

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll
index 69aa12b35d24618..d945cf561698128 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll
@@ -3,6 +3,10 @@
 ; RUN:   -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
 ; RUN:   -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
+; RUN:   -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=lp64d -riscv-v-vector-bits-min=128 \
+; RUN:   -verify-machineinstrs < %s | FileCheck %s
 
 define <2 x half> @select_v2f16(i1 zeroext %c, <2 x half> %a, <2 x half> %b) {
 ; CHECK-LABEL: select_v2f16:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
index 86d24236c6e1eb2..6fe83fed6fd9c2c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
@@ -1,8 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=ilp32d \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=lp64d \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=ilp32d \
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=lp64d \
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN
 
 declare <4 x i1> @llvm.vp.merge.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32)
 
@@ -28,6 +32,28 @@ define <4 x i1> @vpmerge_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 ze
 ; RV64-NEXT:    vmand.mm v9, v0, v9
 ; RV64-NEXT:    vmor.mm v0, v9, v8
 ; RV64-NEXT:    ret
+;
+; RV32ZVFHMIN-LABEL: vpmerge_vv_v4i1:
+; RV32ZVFHMIN:       # %bb.0:
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    vid.v v10
+; RV32ZVFHMIN-NEXT:    vmsltu.vx v10, v10, a0
+; RV32ZVFHMIN-NEXT:    vmand.mm v9, v9, v10
+; RV32ZVFHMIN-NEXT:    vmandn.mm v8, v8, v9
+; RV32ZVFHMIN-NEXT:    vmand.mm v9, v0, v9
+; RV32ZVFHMIN-NEXT:    vmor.mm v0, v9, v8
+; RV32ZVFHMIN-NEXT:    ret
+;
+; RV64ZVFHMIN-LABEL: vpmerge_vv_v4i1:
+; RV64ZVFHMIN:       # %bb.0:
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vid.v v10
+; RV64ZVFHMIN-NEXT:    vmsltu.vx v12, v10, a0
+; RV64ZVFHMIN-NEXT:    vmand.mm v9, v9, v12
+; RV64ZVFHMIN-NEXT:    vmandn.mm v8, v8, v9
+; RV64ZVFHMIN-NEXT:    vmand.mm v9, v0, v9
+; RV64ZVFHMIN-NEXT:    vmor.mm v0, v9, v8
+; RV64ZVFHMIN-NEXT:    ret
   %v = call <4 x i1> @llvm.vp.merge.v4i1(<4 x i1> %m, <4 x i1> %va, <4 x i1> %vb, i32 %evl)
   ret <4 x i1> %v
 }
@@ -583,6 +609,26 @@ define <2 x i64> @vpmerge_vx_v2i64(i64 %a, <2 x i64> %vb, <2 x i1> %m, i32 zeroe
 ; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
 ; RV64-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV64-NEXT:    ret
+;
+; RV32ZVFHMIN-LABEL: vpmerge_vx_v2i64:
+; RV32ZVFHMIN:       # %bb.0:
+; RV32ZVFHMIN-NEXT:    addi sp, sp, -16
+; RV32ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
+; RV32ZVFHMIN-NEXT:    sw a1, 12(sp)
+; RV32ZVFHMIN-NEXT:    sw a0, 8(sp)
+; RV32ZVFHMIN-NEXT:    addi a0, sp, 8
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    vlse64.v v9, (a0), zero
+; RV32ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
+; RV32ZVFHMIN-NEXT:    vmerge.vvm v8, v8, v9, v0
+; RV32ZVFHMIN-NEXT:    addi sp, sp, 16
+; RV32ZVFHMIN-NEXT:    ret
+;
+; RV64ZVFHMIN-LABEL: vpmerge_vx_v2i64:
+; RV64ZVFHMIN:       # %bb.0:
+; RV64ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64ZVFHMIN-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV64ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %a, i32 0
   %va = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
   %v = call <2 x i64> @llvm.vp.merge.v2i64(<2 x i1> %m, <2 x i64> %va, <2 x i64> %vb, i32 %evl)
@@ -634,6 +680,26 @@ define <4 x i64> @vpmerge_vx_v4i64(i64 %a, <4 x i64> %vb, <4 x i1> %m, i32 zeroe
 ; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
 ; RV64-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV64-NEXT:    ret
+;
+; RV32ZVFHMIN-LABEL: vpmerge_vx_v4i64:
+; RV32ZVFHMIN:       # %bb.0:
+; RV32ZVFHMIN-NEXT:    addi sp, sp, -16
+; RV32ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
+; RV32ZVFHMIN-NEXT:    sw a1, 12(sp)
+; RV32ZVFHMIN-NEXT:    sw a0, 8(sp)
+; RV32ZVFHMIN-NEXT:    addi a0, sp, 8
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    vlse64.v v10, (a0), zero
+; RV32ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m2, tu, ma
+; RV32ZVFHMIN-NEXT:    vmerge.vvm v8, v8, v10, v0
+; RV32ZVFHMIN-NEXT:    addi sp, sp, 16
+; RV32ZVFHMIN-NEXT:    ret
+;
+; RV64ZVFHMIN-LABEL: vpmerge_vx_v4i64:
+; RV64ZVFHMIN:       # %bb.0:
+; RV64ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64ZVFHMIN-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV64ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %a, i32 0
   %va = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
   %v = call <4 x i64> @llvm.vp.merge.v4i64(<4 x i1> %m, <4 x i64> %va, <4 x i64> %vb, i32 %evl)
@@ -685,6 +751,26 @@ define <8 x i64> @vpmerge_vx_v8i64(i64 %a, <8 x i64> %vb, <8 x i1> %m, i32 zeroe
 ; RV64-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
 ; RV64-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV64-NEXT:    ret
+;
+; RV32ZVFHMIN-LABEL: vpmerge_vx_v8i64:
+; RV32ZVFHMIN:       # %bb.0:
+; RV32ZVFHMIN-NEXT:    addi sp, sp, -16
+; RV32ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
+; RV32ZVFHMIN-NEXT:    sw a1, 12(sp)
+; RV32ZVFHMIN-NEXT:    sw a0, 8(sp)
+; RV32ZVFHMIN-NEXT:    addi a0, sp, 8
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    vlse64.v v12, (a0), zero
+; RV32ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m4, tu, ma
+; RV32ZVFHMIN-NEXT:    vmerge.vvm v8, v8, v12, v0
+; RV32ZVFHMIN-NEXT:    addi sp, sp, 16
+; RV32ZVFHMIN-NEXT:    ret
+;
+; RV64ZVFHMIN-LABEL: vpmerge_vx_v8i64:
+; RV64ZVFHMIN:       # %bb.0:
+; RV64ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
+; RV64ZVFHMIN-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV64ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %a, i32 0
   %va = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
   %v = call <8 x i64> @llvm.vp.merge.v8i64(<8 x i1> %m, <8 x i64> %va, <8 x i64> %vb, i32 %evl)
@@ -736,6 +822,26 @@ define <16 x i64> @vpmerge_vx_v16i64(i64 %a, <16 x i64> %vb, <16 x i1> %m, i32 z
 ; RV64-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
 ; RV64-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV64-NEXT:    ret
+;
+; RV32ZVFHMIN-LABEL: vpmerge_vx_v16i64:
+; RV32ZVFHMIN:       # %bb.0:
+; RV32ZVFHMIN-NEXT:    addi sp, sp, -16
+; RV32ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
+; RV32ZVFHMIN-NEXT:    sw a1, 12(sp)
+; RV32ZVFHMIN-NEXT:    sw a0, 8(sp)
+; RV32ZVFHMIN-NEXT:    addi a0, sp, 8
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vlse64.v v16, (a0), zero
+; RV32ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, tu, ma
+; RV32ZVFHMIN-NEXT:    vmerge.vvm v8, v8, v16, v0
+; RV32ZVFHMIN-NEXT:    addi sp, sp, 16
+; RV32ZVFHMIN-NEXT:    ret
+;
+; RV64ZVFHMIN-LABEL: vpmerge_vx_v16i64:
+; RV64ZVFHMIN:       # %bb.0:
+; RV64ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
+; RV64ZVFHMIN-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV64ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %a, i32 0
   %va = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
   %v = call <16 x i64> @llvm.vp.merge.v16i64(<16 x i1> %m, <16 x i64> %va, <16 x i64> %vb, i32 %evl)
@@ -768,11 +874,20 @@ define <2 x half> @vpmerge_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m,
 }
 
 define <2 x half> @vpmerge_vf_v2f16(half %a, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpmerge_vf_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vpmerge_vf_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vpmerge_vf_v2f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfmv.v.f v9, fa5
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %a, i32 0
   %va = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
   %v = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %m, <2 x half> %va, <2 x half> %vb, i32 %evl)
@@ -793,11 +908,20 @@ define <4 x half> @vpmerge_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m,
 }
 
 define <4 x half> @vpmerge_vf_v4f16(half %a, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpmerge_vf_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vpmerge_vf_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vpmerge_vf_v4f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfmv.v.f v9, fa5
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %a, i32 0
   %va = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
   %v = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %m, <4 x half> %va, <4 x half> %vb, i32 %evl)
@@ -818,11 +942,20 @@ define <8 x half> @vpmerge_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m,
 }
 
 define <8 x half> @vpmerge_vf_v8f16(half %a, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpmerge_vf_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vpmerge_vf_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vpmerge_vf_v8f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmv.v.f v10, fa5
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %a, i32 0
   %va = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
   %v = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %m, <8 x half> %va, <8 x half> %vb, i32 %evl)
@@ -843,11 +976,20 @@ define <16 x half> @vpmerge_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1
 }
 
 define <16 x half> @vpmerge_vf_v16f16(half %a, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpmerge_vf_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vpmerge_vf_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vpmerge_vf_v16f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT:    vfmv.v.f v12, fa5
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12, v0.t
+; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %a, i32 0
   %va = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
   %v = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %m, <16 x half> %va, <16 x half> %vb, i32 %evl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
index 2d348deb939ea80..d05f580ea7d2229 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
@@ -3,6 +3,10 @@
 ; RUN:   -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=lp64d \
 ; RUN:   -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
+; RUN:   -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=lp64d -riscv-v-vector-bits-min=128 \
+; RUN:   -verify-machineinstrs < %s | FileCheck %s
 
 declare <1 x i1> @llvm.vp.select.v1i1(<1 x i1>, <1 x i1>, <1 x i1>, i32)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/select-fp.ll b/llvm/test/CodeGen/RISCV/rvv/select-fp.ll
index db1b0ca7d8124c6..f8581d8e21b390c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/select-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/select-fp.ll
@@ -3,6 +3,10 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
 define <vscale x 1 x half> @select_nxv1f16(i1 zeroext %c, <vscale x 1 x half> %a, <vscale x 1 x half> %b) {
 ; CHECK-LABEL: select_nxv1f16:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
index ae39d824a62287e..2f8454983d0d6eb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
@@ -1,9 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=ilp32d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=lp64d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
-
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64ZVFHMIN
 declare <vscale x 1 x i1> @llvm.vp.merge.nxv1i1(<vscale x 1 x i1>, <vscale x 1 x i1>, <vscale x 1 x i1>, i32)
 
 define <vscale x 1 x i1> @vpmerge_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
@@ -1084,11 +1087,35 @@ define <vscale x 1 x half> @vpmerge_vv_nxv1f16(<vscale x 1 x half> %va, <vscale
 }
 
 define <vscale x 1 x half> @vpmerge_vf_nxv1f16(half %a, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpmerge_vf_nxv1f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
+; RV32ZVFH-LABEL: vpmerge_vf_nxv1f16:
+; RV32ZVFH:       # %bb.0:
+; RV32ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; RV32ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; RV32ZVFH-NEXT:    ret
+;
+; RV64ZVFH-LABEL: vpmerge_vf_nxv1f16:
+; RV64ZVFH:       # %bb.0:
+; RV64ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; RV64ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; RV64ZVFH-NEXT:    ret
+;
+; RV32ZVFHMIN-LABEL: vpmerge_vf_nxv1f16:
+; RV32ZVFHMIN:       # %bb.0:
+; RV32ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32ZVFHMIN-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; RV32ZVFHMIN-NEXT:    vfmv.v.f v9, fa5
+; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; RV32ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9, v0.t
+; RV32ZVFHMIN-NEXT:    ret
+;
+; RV64ZVFHMIN-LABEL: vpmerge_vf_nxv1f16:
+; RV64ZVFHMIN:       # %bb.0:
+; RV64ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64ZVFHMIN-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; RV64ZVFHMIN-NEXT:    vfmv.v.f v9, fa5
+; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; RV64ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9, v0.t
+; RV64ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %a, i32 0
   %va = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
   %v = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %va, <vscale x 1 x half> %vb, i32 %evl)
@@ -1109,11 +1136,35 @@ define <vscale x 2 x half> @vpmerge_vv_nxv2f16(<vscale x 2 x half> %va, <vscale
 }
 
 define <vscale x 2 x half> @vpmerge_vf_nxv2f16(half %a, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpmerge_vf_nxv2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
+; RV32ZVFH-LABEL: vpmerge_vf_nxv2f16:
+; RV32ZVFH:       # %bb.0:
+; RV32ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; RV32ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; RV32ZVFH-NEXT:    ret
+;
+; RV64ZVFH-LABEL: vpmerge_vf_nxv2f16:
+; RV64ZVFH:       # %bb.0:
+; RV64ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; RV64ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; RV64ZVFH-NEXT:    ret
+;
+; RV32ZVFHMIN-LABEL: vpmerge_vf_nxv2f16:
+; RV32ZVFHMIN:       # %bb.0:
+; RV32ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32ZVFHMIN-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    vfmv.v.f v9, fa5
+; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; RV32ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9, v0.t
+; RV32ZVFHMIN-NEXT:    ret
+;
+; RV64ZVFHMIN-LABEL: vpmerge_vf_nxv2f16:
+; RV64ZVFHMIN:       # %bb.0:
+; RV64ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64ZVFHMIN-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vfmv.v.f v9, fa5
+; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; RV64ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9, v0.t
+; RV64ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %a, i32 0
   %va = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
   %v = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 %evl)
@@ -1134,11 +1185,35 @@ define <vscale x 4 x half> @vpmerge_vv_nxv4f16(<vscale x 4 x half> %va, <vscale
 }
 
 define <vscale x 4 x half> @vpmerge_vf_nxv4f16(half %a, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpmerge_vf_nxv4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
+; RV32ZVFH-LABEL: vpmerge_vf_nxv4f16:
+; RV32ZVFH:       # %bb.0:
+; RV32ZVFH-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; RV32ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; RV32ZVFH-NEXT:    ret
+;
+; RV64ZVFH-LABEL: vpmerge_vf_nxv4f16:
+; RV64ZVFH:       # %bb.0:
+; RV64ZVFH-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; RV64ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; RV64ZVFH-NEXT:    ret
+;
+; RV32ZVFHMIN-LABEL: vpmerge_vf_nxv4f16:
+; RV32ZVFHMIN:       # %bb.0:
+; RV32ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32ZVFHMIN-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    vfmv.v.f v10, fa5
+; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; RV32ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; RV32ZVFHMIN-NEXT:    ret
+;
+; RV64ZVFHMIN-LABEL: vpmerge_vf_nxv4f16:
+; RV64ZVFHMIN:       # %bb.0:
+; RV64ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64ZVFHMIN-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vfmv.v.f v10, fa5
+; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; RV64ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; RV64ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %a, i32 0
   %va = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
   %v = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %va, <vscale x 4 x half> %vb, i32 %evl)
@@ -1159,11 +1234,35 @@ define <vscale x 8 x half> @vpmerge_vv_nxv8f16(<vscale x 8 x half> %va, <vscale
 }
 
 define <vscale x 8 x half> @vpmerge_vf_nxv8f16(half %a, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpmerge_vf_nxv8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
+; RV32ZVFH-LABEL: vpmerge_vf_nxv8f16:
+; RV32ZVFH:       # %bb.0:
+; RV32ZVFH-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; RV32ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; RV32ZVFH-NEXT:    ret
+;
+; RV64ZVFH-LABEL: vpmerge_vf_nxv8f16:
+; RV64ZVFH:       # %bb.0:
+; RV64ZVFH-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; RV64ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; RV64ZVFH-NEXT:    ret
+;
+; RV32ZVFHMIN-LABEL: vpmerge_vf_nxv8f16:
+; RV32ZVFHMIN:       # %bb.0:
+; RV32ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32ZVFHMIN-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    vfmv.v.f v12, fa5
+; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; RV32ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12, v0.t
+; RV32ZVFHMIN-NEXT:    ret
+;
+; RV64ZVFHMIN-LABEL: vpmerge_vf_nxv8f16:
+; RV64ZVFHMIN:       # %bb.0:
+; RV64ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64ZVFHMIN-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vfmv.v.f v12, fa5
+; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; RV64ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12, v0.t
+; RV64ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %a, i32 0
   %va = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
   %v = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %va, <vscale x 8 x half> %vb, i32 %evl)
@@ -1184,11 +1283,35 @@ define <vscale x 16 x half> @vpmerge_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
 }
 
 define <vscale x 16 x half> @vpmerge_vf_nxv16f16(half %a, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpmerge_vf_nxv16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
+; RV32ZVFH-LABEL: vpmerge_vf_nxv16f16:
+; RV32ZVFH:       # %bb.0:
+; RV32ZVFH-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; RV32ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; RV32ZVFH-NEXT:    ret
+;
+; RV64ZVFH-LABEL: vpmerge_vf_nxv16f16:
+; RV64ZVFH:       # %bb.0:
+; RV64ZVFH-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; RV64ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; RV64ZVFH-NEXT:    ret
+;
+; RV32ZVFHMIN-LABEL: vpmerge_vf_nxv16f16:
+; RV32ZVFHMIN:       # %bb.0:
+; RV32ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32ZVFHMIN-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfmv.v.f v16, fa5
+; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; RV32ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16, v0.t
+; RV32ZVFHMIN-NEXT:    ret
+;
+; RV64ZVFHMIN-LABEL: vpmerge_vf_nxv16f16:
+; RV64ZVFHMIN:       # %bb.0:
+; RV64ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64ZVFHMIN-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfmv.v.f v16, fa5
+; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; RV64ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16, v0.t
+; RV64ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %a, i32 0
   %va = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
   %v = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %va, <vscale x 16 x half> %vb, i32 %evl)
@@ -1209,11 +1332,41 @@ define <vscale x 32 x half> @vpmerge_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
 }
 
 define <vscale x 32 x half> @vpmerge_vf_nxv32f16(half %a, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpmerge_vf_nxv32f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
+; RV32ZVFH-LABEL: vpmerge_vf_nxv32f16:
+; RV32ZVFH:       # %bb.0:
+; RV32ZVFH-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; RV32ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; RV32ZVFH-NEXT:    ret
+;
+; RV64ZVFH-LABEL: vpmerge_vf_nxv32f16:
+; RV64ZVFH:       # %bb.0:
+; RV64ZVFH-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; RV64ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; RV64ZVFH-NEXT:    ret
+;
+; RV32ZVFHMIN-LABEL: vpmerge_vf_nxv32f16:
+; RV32ZVFHMIN:       # %bb.0:
+; RV32ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV32ZVFHMIN-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfmv.v.f v24, fa5
+; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    vfncvt.f.f.w v16, v24
+; RV32ZVFHMIN-NEXT:    vmv.v.v v20, v16
+; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; RV32ZVFHMIN-NEXT:    vmerge.vvm v8, v8, v16, v0
+; RV32ZVFHMIN-NEXT:    ret
+;
+; RV64ZVFHMIN-LABEL: vpmerge_vf_nxv32f16:
+; RV64ZVFHMIN:       # %bb.0:
+; RV64ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; RV64ZVFHMIN-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfmv.v.f v24, fa5
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vfncvt.f.f.w v16, v24
+; RV64ZVFHMIN-NEXT:    vmv.v.v v20, v16
+; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; RV64ZVFHMIN-NEXT:    vmerge.vvm v8, v8, v16, v0
+; RV64ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %a, i32 0
   %va = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
   %v = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %va, <vscale x 32 x half> %vb, i32 %evl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
index 3f96dc3f1b4f636..59280e2ec2a8af1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
@@ -1,8 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+zvfh,+v -target-abi=ilp32d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+zvfh,+v -target-abi=lp64d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+zvfhmin,+v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+zvfhmin,+v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-ZVFHMIN
 
 define <vscale x 1 x half> @vfmerge_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %cond) {
 ; CHECK-LABEL: vfmerge_vv_nxv1f16:
@@ -15,11 +19,20 @@ define <vscale x 1 x half> @vfmerge_vv_nxv1f16(<vscale x 1 x half> %va, <vscale
 }
 
 define <vscale x 1 x half> @vfmerge_fv_nxv1f16(<vscale x 1 x half> %va, half %b, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv1f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
+; CHECK-ZVFH-LABEL: vfmerge_fv_nxv1f16:
+; CHECK-ZVFH:       # %bb.0:
+; CHECK-ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; CHECK-ZVFH-NEXT:    ret
+;
+; CHECK-ZVFHMIN-LABEL: vfmerge_fv_nxv1f16:
+; CHECK-ZVFHMIN:       # %bb.0:
+; CHECK-ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK-ZVFHMIN-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-ZVFHMIN-NEXT:    vfmv.v.f v9, fa5
+; CHECK-ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; CHECK-ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9, v0.t
+; CHECK-ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
   %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x half> %splat, <vscale x 1 x half> %va
@@ -37,11 +50,20 @@ define <vscale x 2 x half> @vfmerge_vv_nxv2f16(<vscale x 2 x half> %va, <vscale
 }
 
 define <vscale x 2 x half> @vfmerge_fv_nxv2f16(<vscale x 2 x half> %va, half %b, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
+; CHECK-ZVFH-LABEL: vfmerge_fv_nxv2f16:
+; CHECK-ZVFH:       # %bb.0:
+; CHECK-ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; CHECK-ZVFH-NEXT:    ret
+;
+; CHECK-ZVFHMIN-LABEL: vfmerge_fv_nxv2f16:
+; CHECK-ZVFHMIN:       # %bb.0:
+; CHECK-ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK-ZVFHMIN-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-ZVFHMIN-NEXT:    vfmv.v.f v9, fa5
+; CHECK-ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; CHECK-ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9, v0.t
+; CHECK-ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
   %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x half> %splat, <vscale x 2 x half> %va
@@ -59,11 +81,20 @@ define <vscale x 4 x half> @vfmerge_vv_nxv4f16(<vscale x 4 x half> %va, <vscale
 }
 
 define <vscale x 4 x half> @vfmerge_fv_nxv4f16(<vscale x 4 x half> %va, half %b, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
+; CHECK-ZVFH-LABEL: vfmerge_fv_nxv4f16:
+; CHECK-ZVFH:       # %bb.0:
+; CHECK-ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; CHECK-ZVFH-NEXT:    ret
+;
+; CHECK-ZVFHMIN-LABEL: vfmerge_fv_nxv4f16:
+; CHECK-ZVFHMIN:       # %bb.0:
+; CHECK-ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK-ZVFHMIN-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-ZVFHMIN-NEXT:    vfmv.v.f v10, fa5
+; CHECK-ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; CHECK-ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; CHECK-ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
   %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x half> %splat, <vscale x 4 x half> %va
@@ -81,11 +112,20 @@ define <vscale x 8 x half> @vfmerge_vv_nxv8f16(<vscale x 8 x half> %va, <vscale
 }
 
 define <vscale x 8 x half> @vfmerge_fv_nxv8f16(<vscale x 8 x half> %va, half %b, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
+; CHECK-ZVFH-LABEL: vfmerge_fv_nxv8f16:
+; CHECK-ZVFH:       # %bb.0:
+; CHECK-ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; CHECK-ZVFH-NEXT:    ret
+;
+; CHECK-ZVFHMIN-LABEL: vfmerge_fv_nxv8f16:
+; CHECK-ZVFHMIN:       # %bb.0:
+; CHECK-ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK-ZVFHMIN-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-ZVFHMIN-NEXT:    vfmv.v.f v12, fa5
+; CHECK-ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; CHECK-ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12, v0.t
+; CHECK-ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
   %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x half> %splat, <vscale x 8 x half> %va
@@ -134,11 +174,20 @@ define <vscale x 16 x half> @vfmerge_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
 }
 
 define <vscale x 16 x half> @vfmerge_fv_nxv16f16(<vscale x 16 x half> %va, half %b, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
+; CHECK-ZVFH-LABEL: vfmerge_fv_nxv16f16:
+; CHECK-ZVFH:       # %bb.0:
+; CHECK-ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; CHECK-ZVFH-NEXT:    ret
+;
+; CHECK-ZVFHMIN-LABEL: vfmerge_fv_nxv16f16:
+; CHECK-ZVFHMIN:       # %bb.0:
+; CHECK-ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK-ZVFHMIN-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-ZVFHMIN-NEXT:    vfmv.v.f v16, fa5
+; CHECK-ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
+; CHECK-ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16, v0.t
+; CHECK-ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
   %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x half> %splat, <vscale x 16 x half> %va
@@ -156,11 +205,23 @@ define <vscale x 32 x half> @vfmerge_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
 }
 
 define <vscale x 32 x half> @vfmerge_fv_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv32f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
+; CHECK-ZVFH-LABEL: vfmerge_fv_nxv32f16:
+; CHECK-ZVFH:       # %bb.0:
+; CHECK-ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-ZVFH-NEXT:    vfmerge.vfm v8, v8, fa0, v0
+; CHECK-ZVFH-NEXT:    ret
+;
+; CHECK-ZVFHMIN-LABEL: vfmerge_fv_nxv32f16:
+; CHECK-ZVFHMIN:       # %bb.0:
+; CHECK-ZVFHMIN-NEXT:    fcvt.s.h fa5, fa0
+; CHECK-ZVFHMIN-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-ZVFHMIN-NEXT:    vfmv.v.f v24, fa5
+; CHECK-ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-ZVFHMIN-NEXT:    vfncvt.f.f.w v16, v24
+; CHECK-ZVFHMIN-NEXT:    vmv.v.v v20, v16
+; CHECK-ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-ZVFHMIN-NEXT:    vmerge.vvm v8, v8, v16, v0
+; CHECK-ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
   %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x half> %splat, <vscale x 32 x half> %va

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
index 48dccaf938fbc95..9e7df5eab8dda98 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
@@ -3,6 +3,10 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+m,+zfh,+zvfh,+v -target-abi=lp64d \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+d,+m,+zfh,+zvfhmin,+v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+m,+zfh,+zvfhmin,+v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
 declare <vscale x 1 x i1> @llvm.vp.select.nxv1i1(<vscale x 1 x i1>, <vscale x 1 x i1>, <vscale x 1 x i1>, i32)
 


        


More information about the llvm-commits mailing list