[llvm] 8756940 - [RISCV] Peephole optimization to fold merge.vvm and unmasked intrinsics.

Yeting Kuo via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 11 02:58:20 PDT 2022


Author: Yeting Kuo
Date: 2022-08-11T17:58:11+08:00
New Revision: 875694089d1f54fff41f05158733d51261b7e64c

URL: https://github.com/llvm/llvm-project/commit/875694089d1f54fff41f05158733d51261b7e64c
DIFF: https://github.com/llvm/llvm-project/commit/875694089d1f54fff41f05158733d51261b7e64c.diff

LOG: [RISCV] Peephole optimization to fold merge.vvm and unmasked intrinsics.

The patch uses peephole method to fold merge.vvm and unmasked intrinsics to
masked intrinsics. Using peephole intead of tablegen patterns is to avoid large
auto gnerated code.

Note: The patch ignores segment loads since I don't know how to test them.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D130442

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll
    llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 5d00919dfa783..5dcfaaf6a5b52 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -151,6 +151,8 @@ void RISCVDAGToDAGISel::PostprocessISelDAG() {
 
   CurDAG->setRoot(Dummy.getValue());
 
+  MadeChange |= doPeepholeMergeVVMFold();
+
   if (MadeChange)
     CurDAG->RemoveDeadNodes();
 }
@@ -2598,6 +2600,118 @@ bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(SDNode *N) {
   return true;
 }
 
+// Try to fold VMERGE_VVM with unmasked intrinsic to masked intrinsic. The
+// peephole only deals with VMERGE_VVM which is TU and has false operand same as
+// its true operand now. E.g. (VMERGE_VVM_M1_TU False, False, (VADD_M1 ...),
+// ...) -> (VADD_VV_M1_MASK)
+bool RISCVDAGToDAGISel::doPeepholeMergeVVMFold() {
+  bool MadeChange = false;
+  SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
+
+  while (Position != CurDAG->allnodes_begin()) {
+    SDNode *N = &*--Position;
+    if (N->use_empty() || !N->isMachineOpcode())
+      continue;
+
+    auto IsVMergeTU = [](unsigned Opcode) {
+      return Opcode == RISCV::PseudoVMERGE_VVM_MF8_TU ||
+             Opcode == RISCV::PseudoVMERGE_VVM_MF4_TU ||
+             Opcode == RISCV::PseudoVMERGE_VVM_MF2_TU ||
+             Opcode == RISCV::PseudoVMERGE_VVM_M1_TU ||
+             Opcode == RISCV::PseudoVMERGE_VVM_M2_TU ||
+             Opcode == RISCV::PseudoVMERGE_VVM_M4_TU ||
+             Opcode == RISCV::PseudoVMERGE_VVM_M8_TU;
+    };
+
+    unsigned Opc = N->getMachineOpcode();
+    // TODO: Also deal with TA VMerge nodes.
+    if (!IsVMergeTU(Opc))
+      continue;
+
+    SDValue Merge = N->getOperand(0);
+    SDValue False = N->getOperand(1);
+    SDValue True = N->getOperand(2);
+    SDValue Mask = N->getOperand(3);
+    SDValue VL = N->getOperand(4);
+
+    if (Merge != False)
+      continue;
+
+    assert(True.getResNo() == 0 &&
+           "Expect True is the first output of an instruction.");
+
+    // Need N is the exactly one using True.
+    if (!True.hasOneUse())
+      continue;
+
+    if (!True.isMachineOpcode())
+      continue;
+
+    unsigned TrueOpc = True.getMachineOpcode();
+
+    // Skip if True has merge operand.
+    // TODO: Deal with True having same merge operand with N.
+    if (RISCVII::hasMergeOp(TII->get(TrueOpc).TSFlags))
+      continue;
+
+    // Skip if True has side effect.
+    // TODO: Support velff and vlsegff.
+    if (TII->get(TrueOpc).hasUnmodeledSideEffects())
+      continue;
+
+    // Only deal with True when True is unmasked intrinsic now.
+    const RISCV::RISCVMaskedPseudoInfo *Info =
+        RISCV::lookupMaskedIntrinsicByUnmaskedTA(TrueOpc);
+
+    if (!Info)
+      continue;
+
+    // The last operand of unmasked intrinsic should be sew or chain.
+    bool HasChainOp =
+        True.getOperand(True.getNumOperands() - 1).getValueType() == MVT::Other;
+
+    // Need True has same VL with N.
+    unsigned TrueVLIndex = True.getNumOperands() - HasChainOp - 2;
+    SDValue TrueVL = True.getOperand(TrueVLIndex);
+    if (TrueVL != VL)
+      continue;
+
+    SDLoc DL(N);
+    unsigned MaskedOpc = Info->MaskedPseudo;
+    SmallVector<SDValue, 8> Ops;
+    Ops.push_back(Merge);
+    Ops.append(True->op_begin(), True->op_begin() + TrueVLIndex);
+    Ops.append({Mask, VL, /* SEW */ True.getOperand(TrueVLIndex + 1)});
+
+    if (RISCVII::hasVecPolicyOp(TII->get(MaskedOpc).TSFlags))
+      Ops.push_back(
+          CurDAG->getTargetConstant(/* TUMU */ 0, DL, Subtarget->getXLenVT()));
+
+    // Result node should have chain operand of True.
+    if (HasChainOp)
+      Ops.push_back(True.getOperand(True.getNumOperands() - 1));
+
+    // Result node should take over glued node of N.
+    if (N->getGluedNode())
+      Ops.push_back(N->getOperand(N->getNumOperands() - 1));
+
+    SDNode *Result =
+        CurDAG->getMachineNode(MaskedOpc, DL, True->getVTList(), Ops);
+
+    // Replace vmerge.vvm node by Result.
+    ReplaceUses(SDValue(N, 0), SDValue(Result, 0));
+
+    // Replace another value of True. E.g. chain and VL.
+    for (unsigned Idx = 1; Idx < True->getNumValues(); ++Idx)
+      ReplaceUses(True.getValue(Idx), SDValue(Result, Idx));
+
+    // Try to transform Result to unmasked intrinsic.
+    doPeepholeMaskedRVV(Result);
+    MadeChange = true;
+  }
+  return MadeChange;
+}
+
 // This pass converts a legalized DAG into a RISCV-specific DAG, ready
 // for instruction scheduling.
 FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM,

diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index 89500747992f9..cd8064bc056ce 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -132,6 +132,7 @@ class RISCVDAGToDAGISel : public SelectionDAGISel {
 private:
   bool doPeepholeSExtW(SDNode *Node);
   bool doPeepholeMaskedRVV(SDNode *Node);
+  bool doPeepholeMergeVVMFold();
 };
 
 namespace RISCV {

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index a1071096a4d11..03e77c78692a5 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -472,6 +472,11 @@ class RISCVVLE<bit M, bit TU, bit Str, bit F, bits<3> S, bits<3> L> {
   Pseudo Pseudo = !cast<Pseudo>(NAME);
 }
 
+def lookupMaskedIntrinsicByUnmaskedTA : SearchIndex {
+  let Table = RISCVMaskedPseudosTable;
+  let Key = ["UnmaskedPseudo"];
+}
+
 def RISCVVLETable : GenericTable {
   let FilterClass = "RISCVVLE";
   let CppTypeName = "VLEPseudo";
@@ -1591,6 +1596,7 @@ multiclass VPseudoUSLoad {
           VLESched<eew>;
         def "E" # eew # "_V_" # LInfo # "_MASK" :
           VPseudoUSLoadMask<vreg, eew>,
+          RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
           VLESched<eew>;
       }
     }
@@ -1611,6 +1617,7 @@ multiclass VPseudoFFLoad {
           VLFSched<eew>;
         def "E" # eew # "FF_V_" # LInfo # "_MASK":
           VPseudoUSLoadFFMask<vreg, eew>,
+          RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
           VLFSched<eew>;
       }
     }
@@ -1635,8 +1642,10 @@ multiclass VPseudoSLoad {
                                         VLSSched<eew>;
         def "E" # eew # "_V_" # LInfo # "_TU": VPseudoSLoadNoMaskTU<vreg, eew>,
                                         VLSSched<eew>;
-        def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSLoadMask<vreg, eew>,
-                                                  VLSSched<eew>;
+        def "E" # eew # "_V_" # LInfo # "_MASK" :
+          VPseudoSLoadMask<vreg, eew>,
+          RISCVMaskedPseudo</*MaskOpIdx*/ 3>,
+          VLSSched<eew>;
       }
     }
   }
@@ -1666,6 +1675,7 @@ multiclass VPseudoILoad<bit Ordered> {
               VLXSched<eew, Order>;
             def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" :
               VPseudoILoadMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>,
+              RISCVMaskedPseudo</*MaskOpIdx*/ 3>,
               VLXSched<eew, Order>;
           }
         }
@@ -1785,6 +1795,7 @@ multiclass VPseudoVID_V {
       def "_V_" # m.MX # "_TU": VPseudoNullaryNoMaskTU<m.vrclass>,
                                 Sched<[WriteVMIdxV, ReadVMask]>;
       def "_V_" # m.MX # "_MASK" : VPseudoNullaryMask<m.vrclass>,
+                                   RISCVMaskedPseudo</*MaskOpIdx*/ 1>,
                                    Sched<[WriteVMIdxV, ReadVMask]>;
     }
   }
@@ -1807,6 +1818,7 @@ multiclass VPseudoVIOT_M {
       def "_" # m.MX # "_TU" : VPseudoUnaryNoMaskTU<m.vrclass, VR, constraint>,
                                Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>;
       def "_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, VR, constraint>,
+                                 RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
                                  Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>;
     }
   }
@@ -2137,6 +2149,7 @@ multiclass VPseudoVCLS_V {
       def "_V_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.vrclass>,
                                 Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>;
       def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.vrclass>,
+                                   RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
                                    Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>;
     }
   }
@@ -2150,6 +2163,7 @@ multiclass VPseudoVSQR_V {
       def "_V_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.vrclass>,
                                 Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>;
       def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.vrclass>,
+                                   RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
                                    Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>;
     }
   }
@@ -2163,6 +2177,7 @@ multiclass VPseudoVRCP_V {
       def "_V_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.vrclass>,
                                 Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>;
       def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.vrclass>,
+                                   RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
                                    Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>;
     }
   }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll
index 36e31aa049dfd..d1c3f0a77abc1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll
@@ -1,5 +1,5 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=256 | FileCheck %s
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=256 -stop-after=finalize-isel | FileCheck %s --check-prefix=MIR
 
 declare <8 x i16> @llvm.vp.merge.nxv2i16(<8 x i1>, <8 x i16>, <8 x i16>, i32)
 declare <8 x i32> @llvm.vp.merge.nxv2i32(<8 x i1>, <8 x i32>, <8 x i32>, i32)
@@ -11,11 +11,22 @@ declare <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
 define <8 x i32> @vpmerge_vpadd(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, <8 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpadd:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vadd.vv v9, v9, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpadd
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v9, $v10, $v0, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v10
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v9
+  ; MIR-NEXT:   [[COPY4:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   [[PseudoVADD_VV_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVADD_VV_M1_MASK [[COPY4]], [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY [[PseudoVADD_VV_M1_MASK]]
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <8 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
   %a = call <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
@@ -29,11 +40,23 @@ define <8 x i32> @vpmerge_vpadd2(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y
 ; CHECK-LABEL: vpmerge_vpadd2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vadd.vv v11, v9, v10
 ; CHECK-NEXT:    vmseq.vv v0, v9, v10
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v11, v0
+; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpadd2
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v9, $v10, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v10
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v9
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   [[PseudoVMSEQ_VV_M1_:%[0-9]+]]:vr = PseudoVMSEQ_VV_M1 [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */
+  ; MIR-NEXT:   $v0 = COPY [[PseudoVMSEQ_VV_M1_]]
+  ; MIR-NEXT:   [[PseudoVADD_VV_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVADD_VV_M1_MASK [[COPY3]], [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY [[PseudoVADD_VV_M1_MASK]]
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <8 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
   %a = call <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
@@ -46,13 +69,20 @@ define <8 x i32> @vpmerge_vpadd2(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y
 define <8 x i32> @vpmerge_vpadd3(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpadd3:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e8, mf4, ta, mu
-; CHECK-NEXT:    vmset.m v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vadd.vv v9, v9, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpadd3
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v9, $v10, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v10
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v9
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v8
+  ; MIR-NEXT:   [[PseudoVADD_VV_M1_TU:%[0-9]+]]:vr = PseudoVADD_VV_M1_TU [[COPY3]], [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */
+  ; MIR-NEXT:   $v8 = COPY [[PseudoVADD_VV_M1_TU]]
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <8 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
   %a = call <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
@@ -65,11 +95,22 @@ declare <8 x float> @llvm.vp.fadd.nxv2f32(<8 x float>, <8 x float>, <8 x i1>, i3
 define <8 x float> @vpmerge_vpfadd(<8 x float> %passthru, <8 x float> %x, <8 x float> %y, <8 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpfadd:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfadd.vv v9, v9, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpfadd
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v9, $v10, $v0, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v10
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v9
+  ; MIR-NEXT:   [[COPY4:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   [[PseudoVFADD_VV_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVFADD_VV_M1_MASK [[COPY4]], [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0, implicit $frm
+  ; MIR-NEXT:   $v8 = COPY [[PseudoVFADD_VV_M1_MASK]]
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <8 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
   %a = call <8 x float> @llvm.vp.fadd.nxv2f32(<8 x float> %x, <8 x float> %y, <8 x i1> %mask, i32 %vl)
@@ -82,11 +123,21 @@ declare <8 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<8 x float>, <8 x i1>, i32)
 define <8 x i16> @vpmerge_vpfptosi(<8 x i16> %passthru, <8 x float> %x, <8 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpfptosi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v10, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpfptosi
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v9, $v0, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v9
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   early-clobber %4:vrnov0 = PseudoVFNCVT_RTZ_X_F_W_MF2_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 4 /* e16 */, 0
+  ; MIR-NEXT:   $v8 = COPY %4
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <8 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
   %a = call <8 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<8 x float> %x, <8 x i1> %mask, i32 %vl)
@@ -99,11 +150,21 @@ declare <8 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<8 x i64>, <8 x i1>, i32)
 define <8 x float> @vpmerge_vpsitofp(<8 x float> %passthru, <8 x i64> %x, <8 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpsitofp:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfncvt.f.x.w v9, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vfncvt.f.x.w v8, v10, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpsitofp
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v10m2, $v0, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v10m2
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   early-clobber %4:vrnov0 = PseudoVFNCVT_F_X_W_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY %4
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <8 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
   %a = call <8 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl)
@@ -116,11 +177,21 @@ declare <8 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<8 x i8>, <8 x i1>, i32)
 define <8 x i32> @vpmerge_vpzext(<8 x i32> %passthru, <8 x i8> %x, <8 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpzext:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vzext.vf4 v10, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpzext
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v9, $v0, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v9
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   early-clobber %4:vrnov0 = PseudoVZEXT_VF4_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY %4
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <8 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
   %a = call <8 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<8 x i8> %x, <8 x i1> %mask, i32 %vl)
@@ -133,11 +204,21 @@ declare <8 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<8 x i64>, <8 x i1>, i32)
 define <8 x i32> @vpmerge_vptrunc(<8 x i32> %passthru, <8 x i64> %x, <8 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vptrunc:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vncvt.x.x.w v9, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vncvt.x.x.w v8, v10, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vptrunc
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v10m2, $v0, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v10m2
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   early-clobber %4:vrnov0 = PseudoVNSRL_WX_M1_MASK [[COPY3]], [[COPY2]], $x0, $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY %4
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <8 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
   %a = call <8 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl)
@@ -150,11 +231,21 @@ declare <8 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<8 x float>, <8 x i1>, i32)
 define <8 x double> @vpmerge_vpfpext(<8 x double> %passthru, <8 x float> %x, <8 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpfpext:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.f.f.v v12, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64, m2, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vfwcvt.f.f.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpfpext
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8m2, $v10, $v0, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v10
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   early-clobber %4:vrm2nov0 = PseudoVFWCVT_F_F_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8m2 = COPY %4
+  ; MIR-NEXT:   PseudoRET implicit $v8m2
   %splat = insertelement <8 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
   %a = call <8 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<8 x float> %x, <8 x i1> %mask, i32 %vl)
@@ -167,11 +258,21 @@ declare <8 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<8 x double>, <8 x i1>, i32
 define <8 x float> @vpmerge_vpfptrunc(<8 x float> %passthru, <8 x double> %x, <8 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpfptrunc:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfncvt.f.f.w v9, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vfncvt.f.f.w v8, v10, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpfptrunc
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v10m2, $v0, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v10m2
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   early-clobber %4:vrnov0 = PseudoVFNCVT_F_F_W_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY %4
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <8 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
   %a = call <8 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<8 x double> %x, <8 x i1> %mask, i32 %vl)
@@ -184,11 +285,21 @@ declare <8 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<8 x i32> *, <8 x i1>, i32)
 define <8 x i32> @vpmerge_vpload(<8 x i32> %passthru, <8 x i32> * %p, <8 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpload:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vle32.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpload
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $x10, $v0, $x11
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY [[PseudoVLE32_V_M1_MASK]]
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <8 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
   %a = call <8 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<8 x i32> * %p, <8 x i1> %mask, i32 %vl)
@@ -201,11 +312,24 @@ define <8 x i32> @vpmerge_vpload2(<8 x i32> %passthru, <8 x i32> * %p, <8 x i32>
 ; CHECK-LABEL: vpmerge_vpload2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vle32.v v11, (a0)
 ; CHECK-NEXT:    vmseq.vv v0, v9, v10
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v11, v0
+; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpload2
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $x10, $v9, $v10, $x11
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v10
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v9
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; MIR-NEXT:   [[COPY4:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   [[PseudoVMSEQ_VV_M1_:%[0-9]+]]:vr = PseudoVMSEQ_VV_M1 [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */
+  ; MIR-NEXT:   $v0 = COPY [[PseudoVMSEQ_VV_M1_]]
+  ; MIR-NEXT:   [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY4]], [[COPY3]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY [[PseudoVLE32_V_M1_MASK]]
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <8 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
   %a = call <8 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<8 x i32> * %p, <8 x i1> %mask, i32 %vl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
index 4268b1be6ed3d..122e2d50c7c30 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
@@ -1,5 +1,5 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel | FileCheck %s --check-prefix=MIR
 
 declare <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1>, <vscale x 2 x i16>, <vscale x 2 x i16>, i32)
 declare <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32)
@@ -11,11 +11,22 @@ declare <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32>, <vscale x 2
 define <vscale x 2 x i32> @vpmerge_vpadd(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpadd:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vadd.vv v9, v9, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpadd
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v9, $v10, $v0, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v10
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v9
+  ; MIR-NEXT:   [[COPY4:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   [[PseudoVADD_VV_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVADD_VV_M1_MASK [[COPY4]], [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY [[PseudoVADD_VV_M1_MASK]]
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
   %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
@@ -29,11 +40,23 @@ define <vscale x 2 x i32> @vpmerge_vpadd2(<vscale x 2 x i32> %passthru, <vscale
 ; CHECK-LABEL: vpmerge_vpadd2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vadd.vv v11, v9, v10
 ; CHECK-NEXT:    vmseq.vv v0, v9, v10
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v11, v0
+; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpadd2
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v9, $v10, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v10
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v9
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   [[PseudoVMSEQ_VV_M1_:%[0-9]+]]:vr = PseudoVMSEQ_VV_M1 [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */
+  ; MIR-NEXT:   $v0 = COPY [[PseudoVMSEQ_VV_M1_]]
+  ; MIR-NEXT:   [[PseudoVADD_VV_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVADD_VV_M1_MASK [[COPY3]], [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY [[PseudoVADD_VV_M1_MASK]]
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
   %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
@@ -42,17 +65,24 @@ define <vscale x 2 x i32> @vpmerge_vpadd2(<vscale x 2 x i32> %passthru, <vscale
   ret <vscale x 2 x i32> %b
 }
 
-; Test vp.merge have all-ones mask.
+; Test vp.merge has all-ones mask.
 define <vscale x 2 x i32> @vpmerge_vpadd3(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpadd3:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vmset.m v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vadd.vv v9, v9, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpadd3
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v9, $v10, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v10
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v9
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v8
+  ; MIR-NEXT:   [[PseudoVADD_VV_M1_TU:%[0-9]+]]:vr = PseudoVADD_VV_M1_TU [[COPY3]], [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */
+  ; MIR-NEXT:   $v8 = COPY [[PseudoVADD_VV_M1_TU]]
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
   %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
@@ -65,11 +95,22 @@ declare <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float>, <vscale
 define <vscale x 2 x float> @vpmerge_vpfadd(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpfadd:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfadd.vv v9, v9, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpfadd
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v9, $v10, $v0, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v10
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v9
+  ; MIR-NEXT:   [[COPY4:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   [[PseudoVFADD_VV_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVFADD_VV_M1_MASK [[COPY4]], [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0, implicit $frm
+  ; MIR-NEXT:   $v8 = COPY [[PseudoVFADD_VV_M1_MASK]]
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
   %a = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %mask, i32 %vl)
@@ -85,6 +126,16 @@ define <vscale x 2 x i32> @vpmerge_vrgatherei16(<vscale x 2 x i32> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vrgatherei16
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v9, $v10, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v10
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v9
+  ; MIR-NEXT:   early-clobber %5:vr = PseudoVRGATHEREI16_VV_M1_MF2 [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */
+  ; MIR-NEXT:   $v8 = COPY %5
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %1 = zext i32 %vl to i64
   %2 = tail call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> %x, <vscale x 2 x i16> %y, i64 %1)
   %3 = tail call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %2, <vscale x 2 x i32> %passthru, i32 %vl)
@@ -96,11 +147,21 @@ declare <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float>,
 define <vscale x 2 x i16> @vpmerge_vpfptosi(<vscale x 2 x i16> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpfptosi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v10, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpfptosi
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v9, $v0, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v9
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   early-clobber %4:vrnov0 = PseudoVFNCVT_RTZ_X_F_W_MF2_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 4 /* e16 */, 0
+  ; MIR-NEXT:   $v8 = COPY %4
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
   %a = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> %mask, i32 %vl)
@@ -113,11 +174,21 @@ declare <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64>,
 define <vscale x 2 x float> @vpmerge_vpsitofp(<vscale x 2 x float> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpsitofp:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfncvt.f.x.w v9, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vfncvt.f.x.w v8, v10, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpsitofp
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v10m2, $v0, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v10m2
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   early-clobber %4:vrnov0 = PseudoVFNCVT_F_X_W_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY %4
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
   %a = call <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i1> %mask, i32 %vl)
@@ -130,11 +201,21 @@ declare <vscale x 2 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<vscale x 2 x i8>, <vsca
 define <vscale x 2 x i32> @vpmerge_vpzext(<vscale x 2 x i32> %passthru, <vscale x 2 x i8> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpzext:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vzext.vf4 v10, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpzext
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v9, $v0, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v9
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   early-clobber %4:vrnov0 = PseudoVZEXT_VF4_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY %4
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
   %a = call <vscale x 2 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i1> %mask, i32 %vl)
@@ -147,11 +228,21 @@ declare <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64>, <v
 define <vscale x 2 x i32> @vpmerge_vptrunc(<vscale x 2 x i32> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vptrunc:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vncvt.x.x.w v9, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vncvt.x.x.w v8, v10, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vptrunc
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v10m2, $v0, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v10m2
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   early-clobber %4:vrnov0 = PseudoVNSRL_WX_M1_MASK [[COPY3]], [[COPY2]], $x0, $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY %4
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
   %a = call <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i1> %mask, i32 %vl)
@@ -164,11 +255,21 @@ declare <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float
 define <vscale x 2 x double> @vpmerge_vpfpext(<vscale x 2 x double> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpfpext:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfwcvt.f.f.v v12, v10
-; CHECK-NEXT:    vsetvli zero, zero, e64, m2, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vfwcvt.f.f.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpfpext
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8m2, $v10, $v0, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v10
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   early-clobber %4:vrm2nov0 = PseudoVFWCVT_F_F_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8m2 = COPY %4
+  ; MIR-NEXT:   PseudoRET implicit $v8m2
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
   %a = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> %mask, i32 %vl)
@@ -181,11 +282,21 @@ declare <vscale x 2 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<vscale x 2 x doub
 define <vscale x 2 x float> @vpmerge_vpfptrunc(<vscale x 2 x float> %passthru, <vscale x 2 x double> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpfptrunc:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfncvt.f.f.w v9, v10
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vfncvt.f.f.w v8, v10, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpfptrunc
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v10m2, $v0, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v10m2
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   early-clobber %4:vrnov0 = PseudoVFNCVT_F_F_W_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY %4
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
   %a = call <vscale x 2 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x i1> %mask, i32 %vl)
@@ -198,11 +309,21 @@ declare <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<vscale x 2 x i32> *,
 define <vscale x 2 x i32> @vpmerge_vpload(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpload:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vle32.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpload
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $x10, $v0, $x11
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY [[PseudoVLE32_V_M1_MASK]]
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
   %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
@@ -210,16 +331,29 @@ define <vscale x 2 x i32> @vpmerge_vpload(<vscale x 2 x i32> %passthru, <vscale
   ret <vscale x 2 x i32> %b
 }
 
-; Test result have chain and glued node.
+; Test result has chain and glued node.
 define <vscale x 2 x i32> @vpmerge_vpload2(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vpload2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vle32.v v11, (a0)
 ; CHECK-NEXT:    vmseq.vv v0, v9, v10
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v11, v0
+; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpload2
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $x10, $v9, $v10, $x11
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v10
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v9
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; MIR-NEXT:   [[COPY4:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   [[PseudoVMSEQ_VV_M1_:%[0-9]+]]:vr = PseudoVMSEQ_VV_M1 [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */
+  ; MIR-NEXT:   $v0 = COPY [[PseudoVMSEQ_VV_M1_]]
+  ; MIR-NEXT:   [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY4]], [[COPY3]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY [[PseudoVLE32_V_M1_MASK]]
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
   %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
@@ -228,6 +362,34 @@ define <vscale x 2 x i32> @vpmerge_vpload2(<vscale x 2 x i32> %passthru, <vscale
   ret <vscale x 2 x i32> %b
 }
 
+; Test result has chain output of true operand of merge.vvm.
+define void @vpmerge_vpload_store(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vpmerge_vpload_store:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT:    vle32.v v8, (a0), v0.t
+; CHECK-NEXT:    vs1r.v v8, (a0)
+; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vpload_store
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $x10, $v0, $x11
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store unknown-size into %ir.p, align 8)
+  ; MIR-NEXT:   PseudoRET
+  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
+  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
+  %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+  store <vscale x 2 x i32> %b, <vscale x 2 x i32> * %p
+  ret void
+}
+
 ; FIXME: Merge vmerge.vvm and vleffN.v
 declare { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, i64)
 define <vscale x 2 x i32> @vpmerge_vleff(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
@@ -238,6 +400,19 @@ define <vscale x 2 x i32> @vpmerge_vleff(<vscale x 2 x i32> %passthru, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vleff
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $x10, $v0, $x11
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   [[PseudoVLE32FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE32FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE32FF_V_M1 [[COPY2]], [[COPY]], 5 /* e32 */, implicit-def dead $vl
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   [[PseudoVMERGE_VVM_M1_TU:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1_TU [[COPY3]], [[COPY3]], killed [[PseudoVLE32FF_V_M1_]], $v0, [[COPY]], 5 /* e32 */
+  ; MIR-NEXT:   $v8 = COPY [[PseudoVMERGE_VVM_M1_TU]]
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %1 = zext i32 %vl to i64
   %a = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, i64 %1)
   %b = extractvalue { <vscale x 2 x i32>, i64 } %a, 0
@@ -250,11 +425,22 @@ declare <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(<vscale x 2 x i32>, <vscale
 define <vscale x 2 x i32> @vpmerge_vlse(<vscale x 2 x i32> %passthru,  <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i64 %s, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vlse:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
-; CHECK-NEXT:    vlse32.v v9, (a0), a1
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vsetvli zero, a2, e32, m1, tu, mu
+; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vlse
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $x10, $v0, $x11, $x12
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x12
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x11
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; MIR-NEXT:   [[COPY4:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY2]]
+  ; MIR-NEXT:   [[PseudoVLSE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLSE32_V_M1_MASK [[COPY4]], [[COPY3]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY [[PseudoVLSE32_V_M1_MASK]]
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %1 = zext i32 %vl to i64
   %a = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, i64 %s, i64 %1)
   %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
@@ -266,11 +452,22 @@ declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(<vscale x 2 x i32>
 define <vscale x 2 x i32> @vpmerge_vluxei(<vscale x 2 x i32> %passthru,  <vscale x 2 x i32> * %p, <vscale x 2 x i64> %idx, <vscale x 2 x i1> %m, i64 %s, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vluxei:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
-; CHECK-NEXT:    vluxei64.v v9, (a0), v10
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vsetvli zero, a2, e32, m1, tu, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vluxei
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $x10, $v10m2, $v0, $x12
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x12
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v10m2
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; MIR-NEXT:   [[COPY4:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   early-clobber %6:vrnov0 = PseudoVLUXEI64_V_M2_M1_MASK [[COPY4]], [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY %6
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %1 = zext i32 %vl to i64
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, <vscale x 2 x i64> %idx, i64 %1)
   %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
@@ -282,11 +479,20 @@ declare <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(<vscale x 2 x i32>, i64)
 define <vscale x 2 x i32> @vpmerge_vid(<vscale x 2 x i32> %passthru, <vscale x 2 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vid:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vid.v v9
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vid.v v8, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vid
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v0, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   [[PseudoVID_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVID_V_M1_MASK [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY [[PseudoVID_V_M1_MASK]]
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %1 = zext i32 %vl to i64
   %a = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(<vscale x 2 x i32> undef, i64 %1)
   %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
@@ -298,11 +504,21 @@ declare <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(<vscale x 2 x i32>, <vscale
 define <vscale x 2 x i32> @vpmerge_viota(<vscale x 2 x i32> %passthru, <vscale x 2 x i1> %m, <vscale x 2 x i1> %vm, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_viota:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    viota.m v10, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    viota.m v8, v9, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_viota
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v0, $v9, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v9
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY2]]
+  ; MIR-NEXT:   early-clobber %4:vrnov0 = PseudoVIOTA_M_M1_MASK [[COPY3]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY %4
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %1 = zext i32 %vl to i64
   %a = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i1> %vm, i64 %1)
   %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
@@ -314,11 +530,21 @@ declare <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(<vscale x 2 x i32>, <vsca
 define <vscale x 2 x i32> @vpmerge_vflcass(<vscale x 2 x i32> %passthru, <vscale x 2 x float> %vf, <vscale x 2 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vflcass:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfclass.v v9, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vfclass.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vflcass
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v9, $v0, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v9
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   [[PseudoVFCLASS_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVFCLASS_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0
+  ; MIR-NEXT:   $v8 = COPY [[PseudoVFCLASS_V_M1_MASK]]
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %1 = zext i32 %vl to i64
   %a = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x float> %vf, i64 %1)
   %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
@@ -330,11 +556,21 @@ declare <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(<vscale x 2 x float>, <v
 define <vscale x 2 x float> @vpmerge_vfsqrt(<vscale x 2 x float> %passthru, <vscale x 2 x float> %vf, <vscale x 2 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vfsqrt:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfsqrt.v v9, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vfsqrt
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v9, $v0, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v9
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   [[PseudoVFSQRT_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVFSQRT_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0, implicit $frm
+  ; MIR-NEXT:   $v8 = COPY [[PseudoVFSQRT_V_M1_MASK]]
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %1 = zext i32 %vl to i64
   %a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %vf, i64 %1)
   %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
@@ -346,11 +582,21 @@ declare <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(<vscale x 2 x float>, <v
 define <vscale x 2 x float> @vpmerge_vfrec7(<vscale x 2 x float> %passthru, <vscale x 2 x float> %vf, <vscale x 2 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vpmerge_vfrec7:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vfrec7.v v9, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
-; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
+  ; MIR-LABEL: name: vpmerge_vfrec7
+  ; MIR: bb.0 (%ir-block.0):
+  ; MIR-NEXT:   liveins: $v8, $v9, $v0, $x10
+  ; MIR-NEXT: {{  $}}
+  ; MIR-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+  ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v9
+  ; MIR-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; MIR-NEXT:   $v0 = COPY [[COPY1]]
+  ; MIR-NEXT:   [[PseudoVFREC7_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVFREC7_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0, implicit $frm
+  ; MIR-NEXT:   $v8 = COPY [[PseudoVFREC7_V_M1_MASK]]
+  ; MIR-NEXT:   PseudoRET implicit $v8
   %1 = zext i32 %vl to i64
   %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %vf, i64 %1)
   %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)


        


More information about the llvm-commits mailing list