[llvm] c653711 - [RISCV] Teach vsetvli insertion pass that operations on masks don't care about SEW/LMUL.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 4 09:18:32 PDT 2021


Author: Craig Topper
Date: 2021-06-04T09:17:46-07:00
New Revision: c653711fd3a9ac8399a435f02054ef42cddc4db7

URL: https://github.com/llvm/llvm-project/commit/c653711fd3a9ac8399a435f02054ef42cddc4db7
DIFF: https://github.com/llvm/llvm-project/commit/c653711fd3a9ac8399a435f02054ef42cddc4db7.diff

LOG: [RISCV] Teach vsetvli insertion pass that operations on masks don't care about SEW/LMUL.

All that really matters is that the VLMAX of the preceding
instructions is the same as the VLMAX required by the mask
operation.

Also update the vmsge(u) handling to use the SEW/LMUL we use for
other mask register operations. We were matching it to the compare
before. Some cases will be improve if we fix masked compares to
use tail agnostic policy. I think they ignore the tail policy
anyway.

Reviewed By: frasercrmck

Differential Revision: https://reviews.llvm.org/D103299

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
    llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index c97e0d4895da..df234d3c1358 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -596,60 +596,80 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8_MASK
                                      : RISCV::PseudoVMSLT_VX_MF8_MASK;
-        VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8;
-        VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF8;
         break;
       case RISCVII::VLMUL::LMUL_F4:
         VMSLTOpcode =
             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4_MASK
                                      : RISCV::PseudoVMSLT_VX_MF4_MASK;
-        VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4;
-        VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF4;
         break;
       case RISCVII::VLMUL::LMUL_F2:
         VMSLTOpcode =
             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2_MASK
                                      : RISCV::PseudoVMSLT_VX_MF2_MASK;
-        VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2;
-        VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF2;
         break;
       case RISCVII::VLMUL::LMUL_1:
         VMSLTOpcode =
             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1_MASK
                                      : RISCV::PseudoVMSLT_VX_M1_MASK;
-        VMXOROpcode = RISCV::PseudoVMXOR_MM_M1;
-        VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M1;
         break;
       case RISCVII::VLMUL::LMUL_2:
         VMSLTOpcode =
             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2_MASK
                                      : RISCV::PseudoVMSLT_VX_M2_MASK;
-        VMXOROpcode = RISCV::PseudoVMXOR_MM_M2;
-        VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M2;
         break;
       case RISCVII::VLMUL::LMUL_4:
         VMSLTOpcode =
             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4_MASK
                                      : RISCV::PseudoVMSLT_VX_M4_MASK;
-        VMXOROpcode = RISCV::PseudoVMXOR_MM_M4;
-        VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M4;
         break;
       case RISCVII::VLMUL::LMUL_8:
         VMSLTOpcode =
             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8_MASK
                                      : RISCV::PseudoVMSLT_VX_M8_MASK;
+        break;
+      }
+      // Mask operations use the LMUL from the mask type.
+      switch (RISCVTargetLowering::getLMUL(VT)) {
+      default:
+        llvm_unreachable("Unexpected LMUL!");
+      case RISCVII::VLMUL::LMUL_F8:
+        VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8;
+        VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF8;
+        break;
+      case RISCVII::VLMUL::LMUL_F4:
+        VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4;
+        VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF4;
+        break;
+      case RISCVII::VLMUL::LMUL_F2:
+        VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2;
+        VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF2;
+        break;
+      case RISCVII::VLMUL::LMUL_1:
+        VMXOROpcode = RISCV::PseudoVMXOR_MM_M1;
+        VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M1;
+        break;
+      case RISCVII::VLMUL::LMUL_2:
+        VMXOROpcode = RISCV::PseudoVMXOR_MM_M2;
+        VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M2;
+        break;
+      case RISCVII::VLMUL::LMUL_4:
+        VMXOROpcode = RISCV::PseudoVMXOR_MM_M4;
+        VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M4;
+        break;
+      case RISCVII::VLMUL::LMUL_8:
         VMXOROpcode = RISCV::PseudoVMXOR_MM_M8;
         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M8;
         break;
       }
       SDValue SEW = CurDAG->getTargetConstant(
           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
+      SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
       SDValue VL;
       selectVLOp(Node->getOperand(5), VL);
       SDValue MaskedOff = Node->getOperand(1);
@@ -662,7 +682,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
             CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
             0);
         ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOTOpcode, DL, VT,
-                                                 {Mask, Cmp, VL, SEW}));
+                                                 {Mask, Cmp, VL, MaskSEW}));
         return;
       }
 
@@ -673,7 +693,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
                                  {MaskedOff, Src1, Src2, Mask, VL, SEW}),
           0);
       ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
-                                               {Cmp, Mask, VL, SEW}));
+                                               {Cmp, Mask, VL, MaskSEW}));
       return;
     }
     }

diff  --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 48124160aa26..7121061258f1 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -55,11 +55,13 @@ class VSETVLIInfo {
   // Fields from VTYPE.
   RISCVII::VLMUL VLMul = RISCVII::LMUL_1;
   uint8_t SEW = 0;
-  bool TailAgnostic = false;
-  bool MaskAgnostic = false;
+  uint8_t TailAgnostic : 1;
+  uint8_t MaskAgnostic : 1;
+  uint8_t MaskRegOp : 1;
 
 public:
-  VSETVLIInfo() : AVLImm(0) {}
+  VSETVLIInfo()
+      : AVLImm(0), TailAgnostic(false), MaskAgnostic(false), MaskRegOp(false) {}
 
   static VSETVLIInfo getUnknown() {
     VSETVLIInfo Info;
@@ -114,13 +116,14 @@ class VSETVLIInfo {
     TailAgnostic = RISCVVType::isTailAgnostic(VType);
     MaskAgnostic = RISCVVType::isMaskAgnostic(VType);
   }
-  void setVTYPE(RISCVII::VLMUL L, unsigned S, bool TA, bool MA) {
+  void setVTYPE(RISCVII::VLMUL L, unsigned S, bool TA, bool MA, bool MRO) {
     assert(isValid() && !isUnknown() &&
            "Can't set VTYPE for uninitialized or unknown");
     VLMul = L;
     SEW = S;
     TailAgnostic = TA;
     MaskAgnostic = MA;
+    MaskRegOp = MRO;
   }
 
   unsigned encodeVTYPE() const {
@@ -163,21 +166,34 @@ class VSETVLIInfo {
     return getSEWLMULRatio() == Other.getSEWLMULRatio();
   }
 
-  bool isCompatible(const VSETVLIInfo &Other) const {
-    assert(isValid() && Other.isValid() &&
+  // Determine whether the vector instructions requirements represented by
+  // InstrInfo are compatible with the previous vsetvli instruction represented
+  // by this.
+  bool isCompatible(const VSETVLIInfo &InstrInfo) const {
+    assert(isValid() && InstrInfo.isValid() &&
            "Can't compare invalid VSETVLIInfos");
     // Nothing is compatible with Unknown.
-    if (isUnknown() || Other.isUnknown())
+    if (isUnknown() || InstrInfo.isUnknown())
       return false;
 
-    // If other doesn't need an AVLReg and the SEW matches, consider it
-    // compatible.
-    if (Other.hasAVLReg() && Other.AVLReg == RISCV::NoRegister) {
-      if (SEW == Other.SEW)
+    // If the instruction doesn't need an AVLReg and the SEW matches, consider
+    // it/ compatible.
+    if (InstrInfo.hasAVLReg() && InstrInfo.AVLReg == RISCV::NoRegister) {
+      if (SEW == InstrInfo.SEW)
         return true;
     }
 
-    return hasSameVTYPE(Other) && hasSameAVL(Other);
+    // VTypes must match unless the instruction is a mask reg operation, then it
+    // only care about VLMAX.
+    // FIXME: Mask reg operations are probably ok if "this" VLMAX is larger
+    // than "InstrInfo".
+    if (!hasSameVTYPE(InstrInfo) &&
+        !(InstrInfo.MaskRegOp && hasSameVLMAX(InstrInfo) &&
+          TailAgnostic == InstrInfo.TailAgnostic &&
+          MaskAgnostic == InstrInfo.MaskAgnostic))
+      return false;
+
+    return hasSameAVL(InstrInfo);
   }
 
   bool operator==(const VSETVLIInfo &Other) const {
@@ -316,7 +332,9 @@ static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags,
   RISCVII::VLMUL VLMul = RISCVII::getLMul(TSFlags);
 
   unsigned Log2SEW = MI.getOperand(NumOperands - 1).getImm();
-  unsigned SEW = 1 << Log2SEW;
+  // A Log2SEW of 0 is an operation on mask registers only.
+  bool MaskRegOp = Log2SEW == 0;
+  unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
   assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
 
   // Default to tail agnostic unless the destination is tied to a source.
@@ -350,7 +368,7 @@ static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags,
   } else
     InstrInfo.setAVLReg(RISCV::NoRegister);
   InstrInfo.setVTYPE(VLMul, SEW, /*TailAgnostic*/ TailAgnostic,
-                     /*MaskAgnostic*/ false);
+                     /*MaskAgnostic*/ false, MaskRegOp);
 
   return InstrInfo;
 }

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 9e5720cfe516..75cdefd461fb 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -267,9 +267,10 @@ class GetIntVTypeInfo<VTypeInfo vti>
 class MTypeInfo<ValueType Mas, LMULInfo M, string Bx> {
   ValueType Mask = Mas;
   // {SEW, VLMul} values set a valid VType to deal with this mask type.
-  // we assume SEW=8 and set corresponding LMUL.
-  int SEW = 8;
-  int Log2SEW = 3;
+  // we assume SEW=1 and set corresponding LMUL. vsetvli insertion will
+  // look for SEW=1 to optimize based on surrounding instructions.
+  int SEW = 1;
+  int Log2SEW = 0;
   LMULInfo LMul = M;
   string BX = Bx; // Appendix of mask operations.
   // The pattern fragment which produces the AVL operand, representing the

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
index 619ebb74b3e9..879a3ff8f23c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
@@ -9,7 +9,6 @@ define void @fcmp_oeq_vv_v8f16(<8 x half>* %x, <8 x half>* %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vmfeq.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
@@ -26,7 +25,6 @@ define void @fcmp_oeq_vv_v8f16_nonans(<8 x half>* %x, <8 x half>* %y, <8 x i1>*
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vmfeq.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
@@ -143,7 +141,6 @@ define void @fcmp_olt_vv_v16f16(<16 x half>* %x, <16 x half>* %y, <16 x i1>* %z)
 ; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vmflt.vv v25, v26, v28
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <16 x half>, <16 x half>* %x
@@ -160,7 +157,6 @@ define void @fcmp_olt_vv_v16f16_nonans(<16 x half>* %x, <16 x half>* %y, <16 x i
 ; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vmflt.vv v25, v26, v28
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <16 x half>, <16 x half>* %x
@@ -177,7 +173,6 @@ define void @fcmp_oge_vv_v8f32(<8 x float>* %x, <8 x float>* %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vmfle.vv v25, v28, v26
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
@@ -194,7 +189,6 @@ define void @fcmp_oge_vv_v8f32_nonans(<8 x float>* %x, <8 x float>* %y, <8 x i1>
 ; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vmfle.vv v25, v28, v26
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
@@ -262,7 +256,6 @@ define void @fcmp_ule_vv_v32f16(<32 x half>* %x, <32 x half>* %y, <32 x i1>* %z)
 ; CHECK-NEXT:    vle16.v v28, (a0)
 ; CHECK-NEXT:    vle16.v v8, (a1)
 ; CHECK-NEXT:    vmflt.vv v25, v8, v28
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmnand.mm v25, v25, v25
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
@@ -281,7 +274,6 @@ define void @fcmp_ule_vv_v32f16_nonans(<32 x half>* %x, <32 x half>* %y, <32 x i
 ; CHECK-NEXT:    vle16.v v28, (a0)
 ; CHECK-NEXT:    vle16.v v8, (a1)
 ; CHECK-NEXT:    vmfle.vv v25, v28, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <32 x half>, <32 x half>* %x
@@ -298,7 +290,6 @@ define void @fcmp_uge_vv_v16f32(<16 x float>* %x, <16 x float>* %y, <16 x i1>* %
 ; CHECK-NEXT:    vle32.v v28, (a0)
 ; CHECK-NEXT:    vle32.v v8, (a1)
 ; CHECK-NEXT:    vmflt.vv v25, v28, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v25, v25, v25
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
@@ -316,7 +307,6 @@ define void @fcmp_uge_vv_v16f32_nonans(<16 x float>* %x, <16 x float>* %y, <16 x
 ; CHECK-NEXT:    vle32.v v28, (a0)
 ; CHECK-NEXT:    vle32.v v8, (a1)
 ; CHECK-NEXT:    vmfle.vv v25, v8, v28
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <16 x float>, <16 x float>* %x
@@ -333,7 +323,6 @@ define void @fcmp_ult_vv_v8f64(<8 x double>* %x, <8 x double>* %y, <8 x i1>* %z)
 ; CHECK-NEXT:    vle64.v v28, (a0)
 ; CHECK-NEXT:    vle64.v v8, (a1)
 ; CHECK-NEXT:    vmfle.vv v25, v8, v28
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmnand.mm v25, v25, v25
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
@@ -351,7 +340,6 @@ define void @fcmp_ult_vv_v8f64_nonans(<8 x double>* %x, <8 x double>* %y, <8 x i
 ; CHECK-NEXT:    vle64.v v28, (a0)
 ; CHECK-NEXT:    vle64.v v8, (a1)
 ; CHECK-NEXT:    vmflt.vv v25, v28, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %x
@@ -369,7 +357,6 @@ define void @fcmp_ugt_vv_v64f16(<64 x half>* %x, <64 x half>* %y, <64 x i1>* %z)
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vle16.v v16, (a1)
 ; CHECK-NEXT:    vmfle.vv v25, v8, v16
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vmnand.mm v25, v25, v25
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
@@ -388,7 +375,6 @@ define void @fcmp_ugt_vv_v64f16_nonans(<64 x half>* %x, <64 x half>* %y, <64 x i
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vle16.v v16, (a1)
 ; CHECK-NEXT:    vmflt.vv v25, v16, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <64 x half>, <64 x half>* %x
@@ -407,7 +393,6 @@ define void @fcmp_ueq_vv_v32f32(<32 x float>* %x, <32 x float>* %y, <32 x i1>* %
 ; CHECK-NEXT:    vle32.v v16, (a1)
 ; CHECK-NEXT:    vmflt.vv v25, v8, v16
 ; CHECK-NEXT:    vmflt.vv v26, v16, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmnor.mm v25, v26, v25
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
@@ -426,7 +411,6 @@ define void @fcmp_ueq_vv_v32f32_nonans(<32 x float>* %x, <32 x float>* %y, <32 x
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vle32.v v16, (a1)
 ; CHECK-NEXT:    vmfeq.vv v25, v8, v16
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <32 x float>, <32 x float>* %x
@@ -444,7 +428,6 @@ define void @fcmp_one_vv_v8f64(<16 x double>* %x, <16 x double>* %y, <16 x i1>*
 ; CHECK-NEXT:    vle64.v v16, (a1)
 ; CHECK-NEXT:    vmflt.vv v25, v8, v16
 ; CHECK-NEXT:    vmflt.vv v26, v16, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v25, v26, v25
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
@@ -462,7 +445,6 @@ define void @fcmp_one_vv_v8f64_nonans(<16 x double>* %x, <16 x double>* %y, <16
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vle64.v v16, (a1)
 ; CHECK-NEXT:    vmfne.vv v25, v8, v16
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <16 x double>, <16 x double>* %x
@@ -480,8 +462,8 @@ define void @fcmp_ord_vv_v4f16(<4 x half>* %x, <4 x half>* %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vmfeq.vv v25, v25, v25
 ; CHECK-NEXT:    vmfeq.vv v26, v26, v26
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vmerge.vim v25, v25, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
@@ -507,8 +489,8 @@ define void @fcmp_uno_vv_v4f16(<2 x half>* %x, <2 x half>* %y, <2 x i1>* %z) {
 ; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vmfne.vv v25, v25, v25
 ; CHECK-NEXT:    vmfne.vv v26, v26, v26
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vmerge.vim v25, v25, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
@@ -532,7 +514,6 @@ define void @fcmp_oeq_vf_v8f16(<8 x half>* %x, half %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vmfeq.vf v25, v25, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
@@ -549,7 +530,6 @@ define void @fcmp_oeq_vf_v8f16_nonans(<8 x half>* %x, half %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vmfeq.vf v25, v25, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
@@ -666,7 +646,6 @@ define void @fcmp_olt_vf_v16f16(<16 x half>* %x, half %y, <16 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 16, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vmflt.vf v25, v26, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x half>, <16 x half>* %x
@@ -683,7 +662,6 @@ define void @fcmp_olt_vf_v16f16_nonans(<16 x half>* %x, half %y, <16 x i1>* %z)
 ; CHECK-NEXT:    vsetivli zero, 16, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vmflt.vf v25, v26, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x half>, <16 x half>* %x
@@ -700,7 +678,6 @@ define void @fcmp_oge_vf_v8f32(<8 x float>* %x, float %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vmfge.vf v25, v26, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
@@ -717,7 +694,6 @@ define void @fcmp_oge_vf_v8f32_nonans(<8 x float>* %x, float %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vmfge.vf v25, v26, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
@@ -785,7 +761,6 @@ define void @fcmp_ule_vf_v32f16(<32 x half>* %x, half %y, <32 x i1>* %z) {
 ; CHECK-NEXT:    vsetvli zero, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
 ; CHECK-NEXT:    vmfgt.vf v25, v28, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmnand.mm v25, v25, v25
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
@@ -804,7 +779,6 @@ define void @fcmp_ule_vf_v32f16_nonans(<32 x half>* %x, half %y, <32 x i1>* %z)
 ; CHECK-NEXT:    vsetvli zero, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
 ; CHECK-NEXT:    vmfle.vf v25, v28, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <32 x half>, <32 x half>* %x
@@ -821,7 +795,6 @@ define void @fcmp_uge_vf_v16f32(<16 x float>* %x, float %y, <16 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
 ; CHECK-NEXT:    vmflt.vf v25, v28, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v25, v25, v25
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
@@ -839,7 +812,6 @@ define void @fcmp_uge_vf_v16f32_nonans(<16 x float>* %x, float %y, <16 x i1>* %z
 ; CHECK-NEXT:    vsetivli zero, 16, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
 ; CHECK-NEXT:    vmfge.vf v25, v28, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x float>, <16 x float>* %x
@@ -856,7 +828,6 @@ define void @fcmp_ult_vf_v8f64(<8 x double>* %x, double %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
 ; CHECK-NEXT:    vmfge.vf v25, v28, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmnand.mm v25, v25, v25
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
@@ -874,7 +845,6 @@ define void @fcmp_ult_vf_v8f64_nonans(<8 x double>* %x, double %y, <8 x i1>* %z)
 ; CHECK-NEXT:    vsetivli zero, 8, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
 ; CHECK-NEXT:    vmflt.vf v25, v28, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %x
@@ -892,7 +862,6 @@ define void @fcmp_ugt_vf_v64f16(<64 x half>* %x, half %y, <64 x i1>* %z) {
 ; CHECK-NEXT:    vsetvli zero, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vmfle.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vmnand.mm v25, v25, v25
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
@@ -911,7 +880,6 @@ define void @fcmp_ugt_vf_v64f16_nonans(<64 x half>* %x, half %y, <64 x i1>* %z)
 ; CHECK-NEXT:    vsetvli zero, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <64 x half>, <64 x half>* %x
@@ -930,7 +898,6 @@ define void @fcmp_ueq_vf_v32f32(<32 x float>* %x, float %y, <32 x i1>* %z) {
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmfgt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmnor.mm v25, v26, v25
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
@@ -949,7 +916,6 @@ define void @fcmp_ueq_vf_v32f32_nonans(<32 x float>* %x, float %y, <32 x i1>* %z
 ; CHECK-NEXT:    vsetvli zero, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vmfeq.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <32 x float>, <32 x float>* %x
@@ -967,7 +933,6 @@ define void @fcmp_one_vf_v8f64(<16 x double>* %x, double %y, <16 x i1>* %z) {
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmfgt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v25, v26, v25
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
@@ -985,7 +950,6 @@ define void @fcmp_one_vf_v8f64_nonans(<16 x double>* %x, double %y, <16 x i1>* %
 ; CHECK-NEXT:    vsetivli zero, 16, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vmfne.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x double>, <16 x double>* %x
@@ -1004,8 +968,8 @@ define void @fcmp_ord_vf_v4f16(<4 x half>* %x, half %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
 ; CHECK-NEXT:    vmfeq.vf v26, v26, fa0
 ; CHECK-NEXT:    vmfeq.vv v25, v25, v25
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v25, v26
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vmerge.vim v25, v25, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
@@ -1032,8 +996,8 @@ define void @fcmp_uno_vf_v4f16(<2 x half>* %x, half %y, <2 x i1>* %z) {
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
 ; CHECK-NEXT:    vmfne.vf v26, v26, fa0
 ; CHECK-NEXT:    vmfne.vv v25, v25, v25
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v25, v26
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vmerge.vim v25, v25, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
@@ -1058,7 +1022,6 @@ define void @fcmp_oeq_fv_v8f16(<8 x half>* %x, half %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vmfeq.vf v25, v25, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
@@ -1075,7 +1038,6 @@ define void @fcmp_oeq_fv_v8f16_nonans(<8 x half>* %x, half %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vmfeq.vf v25, v25, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
@@ -1192,7 +1154,6 @@ define void @fcmp_olt_fv_v16f16(<16 x half>* %x, half %y, <16 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 16, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vmfgt.vf v25, v26, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x half>, <16 x half>* %x
@@ -1209,7 +1170,6 @@ define void @fcmp_olt_fv_v16f16_nonans(<16 x half>* %x, half %y, <16 x i1>* %z)
 ; CHECK-NEXT:    vsetivli zero, 16, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vmfgt.vf v25, v26, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x half>, <16 x half>* %x
@@ -1226,7 +1186,6 @@ define void @fcmp_oge_fv_v8f32(<8 x float>* %x, float %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vmfle.vf v25, v26, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
@@ -1243,7 +1202,6 @@ define void @fcmp_oge_fv_v8f32_nonans(<8 x float>* %x, float %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vmfle.vf v25, v26, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
@@ -1311,7 +1269,6 @@ define void @fcmp_ule_fv_v32f16(<32 x half>* %x, half %y, <32 x i1>* %z) {
 ; CHECK-NEXT:    vsetvli zero, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
 ; CHECK-NEXT:    vmflt.vf v25, v28, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmnand.mm v25, v25, v25
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
@@ -1330,7 +1287,6 @@ define void @fcmp_ule_fv_v32f16_nonans(<32 x half>* %x, half %y, <32 x i1>* %z)
 ; CHECK-NEXT:    vsetvli zero, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
 ; CHECK-NEXT:    vmfge.vf v25, v28, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <32 x half>, <32 x half>* %x
@@ -1347,7 +1303,6 @@ define void @fcmp_uge_fv_v16f32(<16 x float>* %x, float %y, <16 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
 ; CHECK-NEXT:    vmfgt.vf v25, v28, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v25, v25, v25
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
@@ -1365,7 +1320,6 @@ define void @fcmp_uge_fv_v16f32_nonans(<16 x float>* %x, float %y, <16 x i1>* %z
 ; CHECK-NEXT:    vsetivli zero, 16, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
 ; CHECK-NEXT:    vmfle.vf v25, v28, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x float>, <16 x float>* %x
@@ -1382,7 +1336,6 @@ define void @fcmp_ult_fv_v8f64(<8 x double>* %x, double %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
 ; CHECK-NEXT:    vmfle.vf v25, v28, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmnand.mm v25, v25, v25
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
@@ -1400,7 +1353,6 @@ define void @fcmp_ult_fv_v8f64_nonans(<8 x double>* %x, double %y, <8 x i1>* %z)
 ; CHECK-NEXT:    vsetivli zero, 8, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
 ; CHECK-NEXT:    vmfgt.vf v25, v28, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %x
@@ -1418,7 +1370,6 @@ define void @fcmp_ugt_fv_v64f16(<64 x half>* %x, half %y, <64 x i1>* %z) {
 ; CHECK-NEXT:    vsetvli zero, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vmfge.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vmnand.mm v25, v25, v25
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
@@ -1437,7 +1388,6 @@ define void @fcmp_ugt_fv_v64f16_nonans(<64 x half>* %x, half %y, <64 x i1>* %z)
 ; CHECK-NEXT:    vsetvli zero, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <64 x half>, <64 x half>* %x
@@ -1456,7 +1406,6 @@ define void @fcmp_ueq_fv_v32f32(<32 x float>* %x, float %y, <32 x i1>* %z) {
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmflt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmnor.mm v25, v26, v25
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
@@ -1475,7 +1424,6 @@ define void @fcmp_ueq_fv_v32f32_nonans(<32 x float>* %x, float %y, <32 x i1>* %z
 ; CHECK-NEXT:    vsetvli zero, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vmfeq.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <32 x float>, <32 x float>* %x
@@ -1493,7 +1441,6 @@ define void @fcmp_one_fv_v8f64(<16 x double>* %x, double %y, <16 x i1>* %z) {
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmflt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v25, v26, v25
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
@@ -1511,7 +1458,6 @@ define void @fcmp_one_fv_v8f64_nonans(<16 x double>* %x, double %y, <16 x i1>* %
 ; CHECK-NEXT:    vsetivli zero, 16, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vmfne.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x double>, <16 x double>* %x
@@ -1530,8 +1476,8 @@ define void @fcmp_ord_fv_v4f16(<4 x half>* %x, half %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
 ; CHECK-NEXT:    vmfeq.vf v26, v26, fa0
 ; CHECK-NEXT:    vmfeq.vv v25, v25, v25
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vmerge.vim v25, v25, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
@@ -1558,8 +1504,8 @@ define void @fcmp_uno_fv_v4f16(<2 x half>* %x, half %y, <2 x i1>* %z) {
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
 ; CHECK-NEXT:    vmfne.vf v26, v26, fa0
 ; CHECK-NEXT:    vmfne.vv v25, v25, v25
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vmerge.vim v25, v25, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
index 7d703a8aa308..a8911850c4cb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
@@ -8,9 +8,7 @@ define void @vselect_vv_v8i32(<8 x i32>* %a, <8 x i32>* %b, <8 x i1>* %cc, <8 x
 ; CHECK-NEXT:    vsetivli zero, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vle32.v v28, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a2)
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v26, v28, v26, v0
 ; CHECK-NEXT:    vse32.v v26, (a3)
 ; CHECK-NEXT:    ret
@@ -27,9 +25,7 @@ define void @vselect_vx_v8i32(i32 %a, <8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a2)
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v26, v26, a0, v0
 ; CHECK-NEXT:    vse32.v v26, (a3)
 ; CHECK-NEXT:    ret
@@ -47,9 +43,7 @@ define void @vselect_vi_v8i32(<8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %z) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v26, v26, -1, v0
 ; CHECK-NEXT:    vse32.v v26, (a2)
 ; CHECK-NEXT:    ret
@@ -68,9 +62,7 @@ define void @vselect_vv_v8f32(<8 x float>* %a, <8 x float>* %b, <8 x i1>* %cc, <
 ; CHECK-NEXT:    vsetivli zero, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vle32.v v28, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a2)
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v26, v28, v26, v0
 ; CHECK-NEXT:    vse32.v v26, (a3)
 ; CHECK-NEXT:    ret
@@ -87,9 +79,7 @@ define void @vselect_vx_v8f32(float %a, <8 x float>* %b, <8 x i1>* %cc, <8 x flo
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v26, v26, fa0, v0
 ; CHECK-NEXT:    vse32.v v26, (a2)
 ; CHECK-NEXT:    ret
@@ -107,9 +97,7 @@ define void @vselect_vfpzero_v8f32(<8 x float>* %b, <8 x i1>* %cc, <8 x float>*
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v26, v26, 0, v0
 ; CHECK-NEXT:    vse32.v v26, (a2)
 ; CHECK-NEXT:    ret
@@ -128,9 +116,7 @@ define void @vselect_vv_v16i16(<16 x i16>* %a, <16 x i16>* %b, <16 x i1>* %cc, <
 ; CHECK-NEXT:    vsetivli zero, 16, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a2)
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v26, v28, v26, v0
 ; CHECK-NEXT:    vse16.v v26, (a3)
 ; CHECK-NEXT:    ret
@@ -147,9 +133,7 @@ define void @vselect_vx_v16i16(i16 signext %a, <16 x i16>* %b, <16 x i1>* %cc, <
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a2)
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v26, v26, a0, v0
 ; CHECK-NEXT:    vse16.v v26, (a3)
 ; CHECK-NEXT:    ret
@@ -167,9 +151,7 @@ define void @vselect_vi_v16i16(<16 x i16>* %b, <16 x i1>* %cc, <16 x i16>* %z) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v26, v26, 4, v0
 ; CHECK-NEXT:    vse16.v v26, (a2)
 ; CHECK-NEXT:    ret
@@ -189,9 +171,7 @@ define void @vselect_vv_v32f16(<32 x half>* %a, <32 x half>* %b, <32 x i1>* %cc,
 ; CHECK-NEXT:    vsetvli zero, a4, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
 ; CHECK-NEXT:    vle16.v v8, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a2)
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v28, v8, v28, v0
 ; CHECK-NEXT:    vse16.v v28, (a3)
 ; CHECK-NEXT:    ret
@@ -209,9 +189,7 @@ define void @vselect_vx_v32f16(half %a, <32 x half>* %b, <32 x i1>* %cc, <32 x h
 ; CHECK-NEXT:    addi a3, zero, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v28, v28, fa0, v0
 ; CHECK-NEXT:    vse16.v v28, (a2)
 ; CHECK-NEXT:    ret
@@ -230,9 +208,7 @@ define void @vselect_vfpzero_v32f16(<32 x half>* %b, <32 x i1>* %cc, <32 x half>
 ; CHECK-NEXT:    addi a3, zero, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v28, v28, 0, v0
 ; CHECK-NEXT:    vse16.v v28, (a2)
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll
index 5acdb879f34a..aa46a403d6b5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll
@@ -10,9 +10,7 @@ define <vscale x 2 x i32> @saddo_nvx2i32(<vscale x 2 x i32> %x, <vscale x 2 x i3
 ; CHECK-NEXT:    vadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmslt.vv v26, v25, v8
 ; CHECK-NEXT:    vmsle.vi v27, v9, -1
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v27, v26
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v25, 0, v0
 ; CHECK-NEXT:    ret
   %a = call { <vscale x 2 x i32>, <vscale x 2 x i1> } @llvm.sadd.with.overflow.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll
index b5c2be5419fb..9afd87e54c67 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll
@@ -291,7 +291,6 @@ define <vscale x 8 x i1> @fcmp_one_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v8, v10
 ; CHECK-NEXT:    vmflt.vv v26, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp one <vscale x 8 x half> %va, %vb
@@ -304,7 +303,6 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmfgt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -319,7 +317,6 @@ define <vscale x 8 x i1> @fcmp_one_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmflt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -356,7 +353,6 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v10, v10
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ord <vscale x 8 x half> %va, %vb
@@ -370,7 +366,6 @@ define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
 ; CHECK-NEXT:    vmfeq.vf v25, v26, fa0
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -386,7 +381,6 @@ define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
 ; CHECK-NEXT:    vmfeq.vf v25, v26, fa0
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -401,7 +395,6 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v10, v10
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ord <vscale x 8 x half> %va, %vb
@@ -415,7 +408,6 @@ define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
 ; CHECK-NEXT:    vmfeq.vf v25, v26, fa0
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -430,7 +422,6 @@ define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v8, v10
 ; CHECK-NEXT:    vmflt.vv v26, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ueq <vscale x 8 x half> %va, %vb
@@ -443,7 +434,6 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmfgt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -458,7 +448,6 @@ define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmflt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -494,7 +483,6 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v25, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ugt <vscale x 8 x half> %va, %vb
@@ -506,7 +494,6 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -520,7 +507,6 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -556,7 +542,6 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp uge <vscale x 8 x half> %va, %vb
@@ -568,7 +553,6 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -582,7 +566,6 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -618,7 +601,6 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ult <vscale x 8 x half> %va, %vb
@@ -630,7 +612,6 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -644,7 +625,6 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -680,7 +660,6 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ule <vscale x 8 x half> %va, %vb
@@ -692,7 +671,6 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -706,7 +684,6 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -799,7 +776,6 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v25, v10, v10
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp uno <vscale x 8 x half> %va, %vb
@@ -813,7 +789,6 @@ define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
 ; CHECK-NEXT:    vmfne.vf v25, v26, fa0
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -829,7 +804,6 @@ define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
 ; CHECK-NEXT:    vmfne.vf v25, v26, fa0
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -844,7 +818,6 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v25, v10, v10
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp uno <vscale x 8 x half> %va, %vb
@@ -858,7 +831,6 @@ define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
 ; CHECK-NEXT:    vmfne.vf v25, v26, fa0
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -1153,7 +1125,6 @@ define <vscale x 8 x i1> @fcmp_one_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v8, v12
 ; CHECK-NEXT:    vmflt.vv v26, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp one <vscale x 8 x float> %va, %vb
@@ -1166,7 +1137,6 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmfgt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1181,7 +1151,6 @@ define <vscale x 8 x i1> @fcmp_one_fv_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmflt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1218,7 +1187,6 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v12, v12
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ord <vscale x 8 x float> %va, %vb
@@ -1232,7 +1200,6 @@ define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK-NEXT:    vfmv.v.f v28, fa0
 ; CHECK-NEXT:    vmfeq.vf v25, v28, fa0
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1248,7 +1215,6 @@ define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK-NEXT:    vfmv.v.f v28, fa0
 ; CHECK-NEXT:    vmfeq.vf v25, v28, fa0
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1263,7 +1229,6 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v12, v12
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ord <vscale x 8 x float> %va, %vb
@@ -1277,7 +1242,6 @@ define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
 ; CHECK-NEXT:    vfmv.v.f v28, fa0
 ; CHECK-NEXT:    vmfeq.vf v25, v28, fa0
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1292,7 +1256,6 @@ define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v8, v12
 ; CHECK-NEXT:    vmflt.vv v26, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ueq <vscale x 8 x float> %va, %vb
@@ -1305,7 +1268,6 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmfgt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1320,7 +1282,6 @@ define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmflt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1356,7 +1317,6 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v25, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ugt <vscale x 8 x float> %va, %vb
@@ -1368,7 +1328,6 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1382,7 +1341,6 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfge.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1418,7 +1376,6 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp uge <vscale x 8 x float> %va, %vb
@@ -1430,7 +1387,6 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1444,7 +1400,6 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1480,7 +1435,6 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ult <vscale x 8 x float> %va, %vb
@@ -1492,7 +1446,6 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfge.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1506,7 +1459,6 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1542,7 +1494,6 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ule <vscale x 8 x float> %va, %vb
@@ -1554,7 +1505,6 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1568,7 +1518,6 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1661,7 +1610,6 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vv v25, v12, v12
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp uno <vscale x 8 x float> %va, %vb
@@ -1675,7 +1623,6 @@ define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK-NEXT:    vfmv.v.f v28, fa0
 ; CHECK-NEXT:    vmfne.vf v25, v28, fa0
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1691,7 +1638,6 @@ define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK-NEXT:    vfmv.v.f v28, fa0
 ; CHECK-NEXT:    vmfne.vf v25, v28, fa0
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1706,7 +1652,6 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vv v25, v12, v12
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp uno <vscale x 8 x float> %va, %vb
@@ -1720,7 +1665,6 @@ define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
 ; CHECK-NEXT:    vfmv.v.f v28, fa0
 ; CHECK-NEXT:    vmfne.vf v25, v28, fa0
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -2015,7 +1959,6 @@ define <vscale x 8 x i1> @fcmp_one_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v8, v16
 ; CHECK-NEXT:    vmflt.vv v26, v16, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp one <vscale x 8 x double> %va, %vb
@@ -2028,7 +1971,6 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmfgt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2043,7 +1985,6 @@ define <vscale x 8 x i1> @fcmp_one_fv_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmflt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2080,7 +2021,6 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v16, v16
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ord <vscale x 8 x double> %va, %vb
@@ -2094,7 +2034,6 @@ define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK-NEXT:    vfmv.v.f v16, fa0
 ; CHECK-NEXT:    vmfeq.vf v25, v16, fa0
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2110,7 +2049,6 @@ define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK-NEXT:    vfmv.v.f v16, fa0
 ; CHECK-NEXT:    vmfeq.vf v25, v16, fa0
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2125,7 +2063,6 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v16, v16
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ord <vscale x 8 x double> %va, %vb
@@ -2139,7 +2076,6 @@ define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    vfmv.v.f v16, fa0
 ; CHECK-NEXT:    vmfeq.vf v25, v16, fa0
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2154,7 +2090,6 @@ define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v8, v16
 ; CHECK-NEXT:    vmflt.vv v26, v16, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ueq <vscale x 8 x double> %va, %vb
@@ -2167,7 +2102,6 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmfgt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2182,7 +2116,6 @@ define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmflt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2218,7 +2151,6 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfle.vv v25, v8, v16
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ugt <vscale x 8 x double> %va, %vb
@@ -2230,7 +2162,6 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfle.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2244,7 +2175,6 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfge.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2280,7 +2210,6 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v8, v16
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp uge <vscale x 8 x double> %va, %vb
@@ -2292,7 +2221,6 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2306,7 +2234,6 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2342,7 +2269,6 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfle.vv v25, v16, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ult <vscale x 8 x double> %va, %vb
@@ -2354,7 +2280,6 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfge.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2368,7 +2293,6 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfle.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2404,7 +2328,6 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v16, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ule <vscale x 8 x double> %va, %vb
@@ -2416,7 +2339,6 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2430,7 +2352,6 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2523,7 +2444,6 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfne.vv v25, v16, v16
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp uno <vscale x 8 x double> %va, %vb
@@ -2537,7 +2457,6 @@ define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK-NEXT:    vfmv.v.f v16, fa0
 ; CHECK-NEXT:    vmfne.vf v25, v16, fa0
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2553,7 +2472,6 @@ define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK-NEXT:    vfmv.v.f v16, fa0
 ; CHECK-NEXT:    vmfne.vf v25, v16, fa0
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2568,7 +2486,6 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfne.vv v25, v16, v16
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp uno <vscale x 8 x double> %va, %vb
@@ -2582,7 +2499,6 @@ define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    vfmv.v.f v16, fa0
 ; CHECK-NEXT:    vmfne.vf v25, v16, fa0
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll
index 9ceef21dc502..1ecca50f42a5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll
@@ -291,7 +291,6 @@ define <vscale x 8 x i1> @fcmp_one_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v8, v10
 ; CHECK-NEXT:    vmflt.vv v26, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp one <vscale x 8 x half> %va, %vb
@@ -304,7 +303,6 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmfgt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -319,7 +317,6 @@ define <vscale x 8 x i1> @fcmp_one_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmflt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -356,7 +353,6 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v10, v10
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ord <vscale x 8 x half> %va, %vb
@@ -370,7 +366,6 @@ define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
 ; CHECK-NEXT:    vmfeq.vf v25, v26, fa0
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -386,7 +381,6 @@ define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
 ; CHECK-NEXT:    vmfeq.vf v25, v26, fa0
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -401,7 +395,6 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v10, v10
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ord <vscale x 8 x half> %va, %vb
@@ -415,7 +408,6 @@ define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
 ; CHECK-NEXT:    vmfeq.vf v25, v26, fa0
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -430,7 +422,6 @@ define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v8, v10
 ; CHECK-NEXT:    vmflt.vv v26, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ueq <vscale x 8 x half> %va, %vb
@@ -443,7 +434,6 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmfgt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -458,7 +448,6 @@ define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmflt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -494,7 +483,6 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v25, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ugt <vscale x 8 x half> %va, %vb
@@ -506,7 +494,6 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -520,7 +507,6 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -556,7 +542,6 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v8, v10
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp uge <vscale x 8 x half> %va, %vb
@@ -568,7 +553,6 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -582,7 +566,6 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -618,7 +601,6 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ult <vscale x 8 x half> %va, %vb
@@ -630,7 +612,6 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -644,7 +625,6 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -680,7 +660,6 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ule <vscale x 8 x half> %va, %vb
@@ -692,7 +671,6 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -706,7 +684,6 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -799,7 +776,6 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v25, v10, v10
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp uno <vscale x 8 x half> %va, %vb
@@ -813,7 +789,6 @@ define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
 ; CHECK-NEXT:    vmfne.vf v25, v26, fa0
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -829,7 +804,6 @@ define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
 ; CHECK-NEXT:    vmfne.vf v25, v26, fa0
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -844,7 +818,6 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
 ; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v25, v10, v10
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp uno <vscale x 8 x half> %va, %vb
@@ -858,7 +831,6 @@ define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
 ; CHECK-NEXT:    vmfne.vf v25, v26, fa0
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x half> undef, half %b, i32 0
@@ -1153,7 +1125,6 @@ define <vscale x 8 x i1> @fcmp_one_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v8, v12
 ; CHECK-NEXT:    vmflt.vv v26, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp one <vscale x 8 x float> %va, %vb
@@ -1166,7 +1137,6 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmfgt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1181,7 +1151,6 @@ define <vscale x 8 x i1> @fcmp_one_fv_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmflt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1218,7 +1187,6 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v12, v12
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ord <vscale x 8 x float> %va, %vb
@@ -1232,7 +1200,6 @@ define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK-NEXT:    vfmv.v.f v28, fa0
 ; CHECK-NEXT:    vmfeq.vf v25, v28, fa0
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1248,7 +1215,6 @@ define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK-NEXT:    vfmv.v.f v28, fa0
 ; CHECK-NEXT:    vmfeq.vf v25, v28, fa0
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1263,7 +1229,6 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v12, v12
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ord <vscale x 8 x float> %va, %vb
@@ -1277,7 +1242,6 @@ define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
 ; CHECK-NEXT:    vfmv.v.f v28, fa0
 ; CHECK-NEXT:    vmfeq.vf v25, v28, fa0
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1292,7 +1256,6 @@ define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v8, v12
 ; CHECK-NEXT:    vmflt.vv v26, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ueq <vscale x 8 x float> %va, %vb
@@ -1305,7 +1268,6 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmfgt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1320,7 +1282,6 @@ define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmflt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1356,7 +1317,6 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v25, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ugt <vscale x 8 x float> %va, %vb
@@ -1368,7 +1328,6 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1382,7 +1341,6 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfge.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1418,7 +1376,6 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v8, v12
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp uge <vscale x 8 x float> %va, %vb
@@ -1430,7 +1387,6 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1444,7 +1400,6 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1480,7 +1435,6 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ult <vscale x 8 x float> %va, %vb
@@ -1492,7 +1446,6 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfge.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1506,7 +1459,6 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1542,7 +1494,6 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ule <vscale x 8 x float> %va, %vb
@@ -1554,7 +1505,6 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1568,7 +1518,6 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1661,7 +1610,6 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vv v25, v12, v12
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp uno <vscale x 8 x float> %va, %vb
@@ -1675,7 +1623,6 @@ define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK-NEXT:    vfmv.v.f v28, fa0
 ; CHECK-NEXT:    vmfne.vf v25, v28, fa0
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1691,7 +1638,6 @@ define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f32(<vscale x 8 x float> %va, float %b
 ; CHECK-NEXT:    vfmv.v.f v28, fa0
 ; CHECK-NEXT:    vmfne.vf v25, v28, fa0
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -1706,7 +1652,6 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
 ; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vv v25, v12, v12
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp uno <vscale x 8 x float> %va, %vb
@@ -1720,7 +1665,6 @@ define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
 ; CHECK-NEXT:    vfmv.v.f v28, fa0
 ; CHECK-NEXT:    vmfne.vf v25, v28, fa0
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
@@ -2015,7 +1959,6 @@ define <vscale x 8 x i1> @fcmp_one_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v8, v16
 ; CHECK-NEXT:    vmflt.vv v26, v16, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp one <vscale x 8 x double> %va, %vb
@@ -2028,7 +1971,6 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmfgt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2043,7 +1985,6 @@ define <vscale x 8 x i1> @fcmp_one_fv_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmflt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2080,7 +2021,6 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v16, v16
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ord <vscale x 8 x double> %va, %vb
@@ -2094,7 +2034,6 @@ define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK-NEXT:    vfmv.v.f v16, fa0
 ; CHECK-NEXT:    vmfeq.vf v25, v16, fa0
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2110,7 +2049,6 @@ define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK-NEXT:    vfmv.v.f v16, fa0
 ; CHECK-NEXT:    vmfeq.vf v25, v16, fa0
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2125,7 +2063,6 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v16, v16
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ord <vscale x 8 x double> %va, %vb
@@ -2139,7 +2076,6 @@ define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    vfmv.v.f v16, fa0
 ; CHECK-NEXT:    vmfeq.vf v25, v16, fa0
 ; CHECK-NEXT:    vmfeq.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2154,7 +2090,6 @@ define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v8, v16
 ; CHECK-NEXT:    vmflt.vv v26, v16, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ueq <vscale x 8 x double> %va, %vb
@@ -2167,7 +2102,6 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmfgt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2182,7 +2116,6 @@ define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmflt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2218,7 +2151,6 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfle.vv v25, v8, v16
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ugt <vscale x 8 x double> %va, %vb
@@ -2230,7 +2162,6 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfle.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2244,7 +2175,6 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfge.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2280,7 +2210,6 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v8, v16
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp uge <vscale x 8 x double> %va, %vb
@@ -2292,7 +2221,6 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2306,7 +2234,6 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2342,7 +2269,6 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfle.vv v25, v16, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ult <vscale x 8 x double> %va, %vb
@@ -2354,7 +2280,6 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfge.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2368,7 +2293,6 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfle.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2404,7 +2328,6 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmflt.vv v25, v16, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp ule <vscale x 8 x double> %va, %vb
@@ -2416,7 +2339,6 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2430,7 +2352,6 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2523,7 +2444,6 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfne.vv v25, v16, v16
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp uno <vscale x 8 x double> %va, %vb
@@ -2537,7 +2457,6 @@ define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK-NEXT:    vfmv.v.f v16, fa0
 ; CHECK-NEXT:    vmfne.vf v25, v16, fa0
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2553,7 +2472,6 @@ define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f64(<vscale x 8 x double> %va, double
 ; CHECK-NEXT:    vfmv.v.f v16, fa0
 ; CHECK-NEXT:    vmfne.vf v25, v16, fa0
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0
@@ -2568,7 +2486,6 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmfne.vv v25, v16, v16
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %vc = fcmp uno <vscale x 8 x double> %va, %vb
@@ -2582,7 +2499,6 @@ define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    vfmv.v.f v16, fa0
 ; CHECK-NEXT:    vmfne.vf v25, v16, fa0
 ; CHECK-NEXT:    vmfne.vv v26, v8, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> undef, double %b, i32 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
index ae1a643cd489..dbe330c4c988 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
@@ -1266,7 +1266,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1315,7 +1315,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1364,7 +1364,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1413,7 +1413,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1462,7 +1462,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1511,7 +1511,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1560,7 +1560,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1609,7 +1609,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1658,7 +1658,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll
index 38c3ee5fa0d7..4fdd4c5e0ea8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll
@@ -1266,7 +1266,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1315,7 +1315,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1364,7 +1364,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1413,7 +1413,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1462,7 +1462,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i
 ; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1511,7 +1511,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1560,7 +1560,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1609,7 +1609,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1658,7 +1658,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1707,7 +1707,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1756,7 +1756,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1805,7 +1805,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
index f1cf775cced9..d011c496b824 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
@@ -1266,7 +1266,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1315,7 +1315,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1364,7 +1364,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1413,7 +1413,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1462,7 +1462,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1511,7 +1511,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1560,7 +1560,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1609,7 +1609,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1658,7 +1658,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll
index 78669f56abf0..efb575367714 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll
@@ -1266,7 +1266,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1315,7 +1315,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1364,7 +1364,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1413,7 +1413,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1462,7 +1462,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x
 ; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1511,7 +1511,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1560,7 +1560,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1609,7 +1609,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1658,7 +1658,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1707,7 +1707,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -1756,7 +1756,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1805,7 +1805,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
 ; CHECK-NEXT:    vsetvli zero, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
 ; CHECK-NEXT:    ret
 entry:


        


More information about the llvm-commits mailing list