[llvm] 4a3e461 - [RISCV] Extend demanded field transform in InsertVSETVLI to VTYPE subfeilds

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 16 08:01:46 PDT 2022


Author: Philip Reames
Date: 2022-06-16T08:01:27-07:00
New Revision: 4a3e46115a7feae955e26b537efd3de07dd88060

URL: https://github.com/llvm/llvm-project/commit/4a3e46115a7feae955e26b537efd3de07dd88060
DIFF: https://github.com/llvm/llvm-project/commit/4a3e46115a7feae955e26b537efd3de07dd88060.diff

LOG: [RISCV] Extend demanded field transform in InsertVSETVLI to VTYPE subfeilds

The motivating case, and the only one actually enabled by this patch, is a load or store followed by another op with the same SEW/LMUL ratio.

As an example, consider:

define void @test1(ptr %in, ptr %out) {
entry:
  %0 = load <8 x i16>, ptr %in, align 2
  %1 = sext <8 x i16> %0 to <8 x i32>
  store <8 x i32> %1, ptr %out, align 4
  ret void
}

Without this patch, we get:

	vsetivli	zero, 8, e16, mf4, ta, mu
	vle16.v	v8, (a0)
	vsetvli	zero, zero, e32, mf2, ta, mu
	vsext.vf2	v9, v8
	vse32.v	v9, (a1)
	ret

Whereas with the patch we get:

	vsetivli	zero, 8, e32, mf2, ta, mu
	vle16.v	v8, (a0)
	vsext.vf2	v9, v8
	vse32.v	v9, (a1)
	ret

We have rewritten the first vsetvli and thus removed the second one.

As is strongly hinted by the code structure and todos, I am planning on communing this with all (or most all?) of the cases from isCompatible used in the forward data flow. This will be done in a series of following changes - some NFC reworks, and some reviewed optimization extensions.

Differential Revision: https://reviews.llvm.org/D127780

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
    llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 9ae84f4bf7aed..eaa8e74ebd3b4 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -1404,9 +1404,68 @@ static DemandedFields getDemanded(const MachineInstr &MI) {
     Res.MaskPolicy = true;
   }
 
+  // Loads and stores with implicit EEW do not demand SEW or LMUL directly.
+  // They instead demand the ratio of the two which is used in computing
+  // EMUL, but which allows us the flexibility to change SEW and LMUL
+  // provided we don't change the ratio.
+  if (getEEWForLoadStore(MI)) {
+    Res.SEW = false;
+    Res.LMUL = false;
+  }
+
   return Res;
 }
 
+// Return true if we can mutate PrevMI's VTYPE to match MI's
+// without changing any the fields which have been used.
+// TODO: Restructure code to allow code reuse between this and isCompatible
+// above.
+static bool canMutatePriorConfig(const MachineInstr &PrevMI,
+                                 const MachineInstr &MI,
+                                 const DemandedFields &Used) {
+  // TODO: Extend this to handle cases where VL does change, but VL
+  // has not been used.  (e.g. over a vmv.x.s)
+  if (!isVLPreservingConfig(MI))
+    // Note: `vsetvli x0, x0, vtype' is the canonical instruction
+    // for this case.  If you find yourself wanting to add other forms
+    // to this "unused VTYPE" case, we're probably missing a
+    // canonicalization earlier.
+    return false;
+
+  if (!PrevMI.getOperand(2).isImm() || !MI.getOperand(2).isImm())
+    return false;
+
+  auto PriorVType = PrevMI.getOperand(2).getImm();
+  auto VType = MI.getOperand(2).getImm();
+
+  if (Used.SEW &&
+      RISCVVType::getSEW(VType) != RISCVVType::getSEW(PriorVType))
+    return false;
+
+  if (Used.LMUL &&
+      RISCVVType::getVLMUL(VType) != RISCVVType::getVLMUL(PriorVType))
+    return false;
+
+  if (Used.SEWLMULRatio) {
+    auto PriorRatio =
+      VSETVLIInfo::getSEWLMULRatio(RISCVVType::getSEW(PriorVType),
+                                   RISCVVType::getVLMUL(PriorVType));
+    auto Ratio =
+      VSETVLIInfo::getSEWLMULRatio(RISCVVType::getSEW(VType),
+                                   RISCVVType::getVLMUL(VType));
+    if (PriorRatio != Ratio)
+      return false;
+  }
+
+  if (Used.TailPolicy &&
+      RISCVVType::isTailAgnostic(VType) != RISCVVType::isTailAgnostic(PriorVType))
+    return false;
+  if (Used.MaskPolicy &&
+      RISCVVType::isMaskAgnostic(VType) != RISCVVType::isMaskAgnostic(PriorVType))
+    return false;
+  return true;
+}
+
 void RISCVInsertVSETVLI::doLocalPostpass(MachineBasicBlock &MBB) {
   MachineInstr *PrevMI = nullptr;
   DemandedFields Used;
@@ -1423,14 +1482,7 @@ void RISCVInsertVSETVLI::doLocalPostpass(MachineBasicBlock &MBB) {
       if (!Used.VL && !Used.usedVTYPE()) {
         ToDelete.push_back(PrevMI);
         // fallthrough
-      } else if (!Used.usedVTYPE() && isVLPreservingConfig(MI)) {
-        // Note: `vsetvli x0, x0, vtype' is the canonical instruction
-        // for this case.  If you find yourself wanting to add other forms
-        // to this "unused VTYPE" case, we're probably missing a
-        // canonicalization earlier.
-        // Note: We don't need to explicitly check vtype compatibility
-        // here because this form is only legal (per ISA) when not
-        // changing VL.
+      } else if (canMutatePriorConfig(*PrevMI, MI, Used)) {
         PrevMI->getOperand(2).setImm(MI.getOperand(2).getImm());
         ToDelete.push_back(&MI);
         // Leave PrevMI unchanged

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
index 1c32599891cb0..3ef7f9a5479c3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
@@ -18,9 +18,8 @@ define <vscale x 1 x i8> @sextload_nxv1i1_nxv1i8(<vscale x 1 x i1>* %x) {
 define <vscale x 1 x i16> @sextload_nxv1i8_nxv1i16(<vscale x 1 x i8>* %x) {
 ; CHECK-LABEL: sextload_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <vscale x 1 x i8>, <vscale x 1 x i8>* %x
@@ -31,9 +30,8 @@ define <vscale x 1 x i16> @sextload_nxv1i8_nxv1i16(<vscale x 1 x i8>* %x) {
 define <vscale x 1 x i16> @zextload_nxv1i8_nxv1i16(<vscale x 1 x i8>* %x) {
 ; CHECK-LABEL: zextload_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <vscale x 1 x i8>, <vscale x 1 x i8>* %x
@@ -44,9 +42,8 @@ define <vscale x 1 x i16> @zextload_nxv1i8_nxv1i16(<vscale x 1 x i8>* %x) {
 define <vscale x 1 x i32> @sextload_nxv1i8_nxv1i32(<vscale x 1 x i8>* %x) {
 ; CHECK-LABEL: sextload_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vsext.vf4 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <vscale x 1 x i8>, <vscale x 1 x i8>* %x
@@ -57,9 +54,8 @@ define <vscale x 1 x i32> @sextload_nxv1i8_nxv1i32(<vscale x 1 x i8>* %x) {
 define <vscale x 1 x i32> @zextload_nxv1i8_nxv1i32(<vscale x 1 x i8>* %x) {
 ; CHECK-LABEL: zextload_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <vscale x 1 x i8>, <vscale x 1 x i8>* %x
@@ -70,9 +66,8 @@ define <vscale x 1 x i32> @zextload_nxv1i8_nxv1i32(<vscale x 1 x i8>* %x) {
 define <vscale x 1 x i64> @sextload_nxv1i8_nxv1i64(<vscale x 1 x i8>* %x) {
 ; CHECK-LABEL: sextload_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vsext.vf8 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <vscale x 1 x i8>, <vscale x 1 x i8>* %x
@@ -83,9 +78,8 @@ define <vscale x 1 x i64> @sextload_nxv1i8_nxv1i64(<vscale x 1 x i8>* %x) {
 define <vscale x 1 x i64> @zextload_nxv1i8_nxv1i64(<vscale x 1 x i8>* %x) {
 ; CHECK-LABEL: zextload_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vzext.vf8 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <vscale x 1 x i8>, <vscale x 1 x i8>* %x
@@ -96,9 +90,8 @@ define <vscale x 1 x i64> @zextload_nxv1i8_nxv1i64(<vscale x 1 x i8>* %x) {
 define <vscale x 2 x i16> @sextload_nxv2i8_nxv2i16(<vscale x 2 x i8>* %x) {
 ; CHECK-LABEL: sextload_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <vscale x 2 x i8>, <vscale x 2 x i8>* %x
@@ -109,9 +102,8 @@ define <vscale x 2 x i16> @sextload_nxv2i8_nxv2i16(<vscale x 2 x i8>* %x) {
 define <vscale x 2 x i16> @zextload_nxv2i8_nxv2i16(<vscale x 2 x i8>* %x) {
 ; CHECK-LABEL: zextload_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <vscale x 2 x i8>, <vscale x 2 x i8>* %x
@@ -122,9 +114,8 @@ define <vscale x 2 x i16> @zextload_nxv2i8_nxv2i16(<vscale x 2 x i8>* %x) {
 define <vscale x 2 x i32> @sextload_nxv2i8_nxv2i32(<vscale x 2 x i8>* %x) {
 ; CHECK-LABEL: sextload_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vsext.vf4 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <vscale x 2 x i8>, <vscale x 2 x i8>* %x
@@ -135,9 +126,8 @@ define <vscale x 2 x i32> @sextload_nxv2i8_nxv2i32(<vscale x 2 x i8>* %x) {
 define <vscale x 2 x i32> @zextload_nxv2i8_nxv2i32(<vscale x 2 x i8>* %x) {
 ; CHECK-LABEL: zextload_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <vscale x 2 x i8>, <vscale x 2 x i8>* %x
@@ -148,9 +138,8 @@ define <vscale x 2 x i32> @zextload_nxv2i8_nxv2i32(<vscale x 2 x i8>* %x) {
 define <vscale x 2 x i64> @sextload_nxv2i8_nxv2i64(<vscale x 2 x i8>* %x) {
 ; CHECK-LABEL: sextload_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v10, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vsext.vf8 v8, v10
 ; CHECK-NEXT:    ret
   %y = load <vscale x 2 x i8>, <vscale x 2 x i8>* %x
@@ -161,9 +150,8 @@ define <vscale x 2 x i64> @sextload_nxv2i8_nxv2i64(<vscale x 2 x i8>* %x) {
 define <vscale x 2 x i64> @zextload_nxv2i8_nxv2i64(<vscale x 2 x i8>* %x) {
 ; CHECK-LABEL: zextload_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v10, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vzext.vf8 v8, v10
 ; CHECK-NEXT:    ret
   %y = load <vscale x 2 x i8>, <vscale x 2 x i8>* %x
@@ -174,9 +162,8 @@ define <vscale x 2 x i64> @zextload_nxv2i8_nxv2i64(<vscale x 2 x i8>* %x) {
 define <vscale x 4 x i16> @sextload_nxv4i8_nxv4i16(<vscale x 4 x i8>* %x) {
 ; CHECK-LABEL: sextload_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x
@@ -187,9 +174,8 @@ define <vscale x 4 x i16> @sextload_nxv4i8_nxv4i16(<vscale x 4 x i8>* %x) {
 define <vscale x 4 x i16> @zextload_nxv4i8_nxv4i16(<vscale x 4 x i8>* %x) {
 ; CHECK-LABEL: zextload_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x
@@ -200,9 +186,8 @@ define <vscale x 4 x i16> @zextload_nxv4i8_nxv4i16(<vscale x 4 x i8>* %x) {
 define <vscale x 4 x i32> @sextload_nxv4i8_nxv4i32(<vscale x 4 x i8>* %x) {
 ; CHECK-LABEL: sextload_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v10, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vsext.vf4 v8, v10
 ; CHECK-NEXT:    ret
   %y = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x
@@ -213,9 +198,8 @@ define <vscale x 4 x i32> @sextload_nxv4i8_nxv4i32(<vscale x 4 x i8>* %x) {
 define <vscale x 4 x i32> @zextload_nxv4i8_nxv4i32(<vscale x 4 x i8>* %x) {
 ; CHECK-LABEL: zextload_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v10, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vzext.vf4 v8, v10
 ; CHECK-NEXT:    ret
   %y = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x
@@ -226,9 +210,8 @@ define <vscale x 4 x i32> @zextload_nxv4i8_nxv4i32(<vscale x 4 x i8>* %x) {
 define <vscale x 4 x i64> @sextload_nxv4i8_nxv4i64(<vscale x 4 x i8>* %x) {
 ; CHECK-LABEL: sextload_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v12, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vsext.vf8 v8, v12
 ; CHECK-NEXT:    ret
   %y = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x
@@ -239,9 +222,8 @@ define <vscale x 4 x i64> @sextload_nxv4i8_nxv4i64(<vscale x 4 x i8>* %x) {
 define <vscale x 4 x i64> @zextload_nxv4i8_nxv4i64(<vscale x 4 x i8>* %x) {
 ; CHECK-LABEL: zextload_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v12, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vzext.vf8 v8, v12
 ; CHECK-NEXT:    ret
   %y = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x
@@ -421,9 +403,8 @@ define void @truncstore_nxv1i16_nxv1i8(<vscale x 1 x i16> %x, <vscale x 1 x i8>*
 define <vscale x 1 x i32> @sextload_nxv1i16_nxv1i32(<vscale x 1 x i16>* %x) {
 ; CHECK-LABEL: sextload_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <vscale x 1 x i16>, <vscale x 1 x i16>* %x
@@ -434,9 +415,8 @@ define <vscale x 1 x i32> @sextload_nxv1i16_nxv1i32(<vscale x 1 x i16>* %x) {
 define <vscale x 1 x i32> @zextload_nxv1i16_nxv1i32(<vscale x 1 x i16>* %x) {
 ; CHECK-LABEL: zextload_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <vscale x 1 x i16>, <vscale x 1 x i16>* %x
@@ -447,9 +427,8 @@ define <vscale x 1 x i32> @zextload_nxv1i16_nxv1i32(<vscale x 1 x i16>* %x) {
 define <vscale x 1 x i64> @sextload_nxv1i16_nxv1i64(<vscale x 1 x i16>* %x) {
 ; CHECK-LABEL: sextload_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vsext.vf4 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <vscale x 1 x i16>, <vscale x 1 x i16>* %x
@@ -460,9 +439,8 @@ define <vscale x 1 x i64> @sextload_nxv1i16_nxv1i64(<vscale x 1 x i16>* %x) {
 define <vscale x 1 x i64> @zextload_nxv1i16_nxv1i64(<vscale x 1 x i16>* %x) {
 ; CHECK-LABEL: zextload_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <vscale x 1 x i16>, <vscale x 1 x i16>* %x
@@ -485,9 +463,8 @@ define void @truncstore_nxv2i16_nxv2i8(<vscale x 2 x i16> %x, <vscale x 2 x i8>*
 define <vscale x 2 x i32> @sextload_nxv2i16_nxv2i32(<vscale x 2 x i16>* %x) {
 ; CHECK-LABEL: sextload_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <vscale x 2 x i16>, <vscale x 2 x i16>* %x
@@ -498,9 +475,8 @@ define <vscale x 2 x i32> @sextload_nxv2i16_nxv2i32(<vscale x 2 x i16>* %x) {
 define <vscale x 2 x i32> @zextload_nxv2i16_nxv2i32(<vscale x 2 x i16>* %x) {
 ; CHECK-LABEL: zextload_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <vscale x 2 x i16>, <vscale x 2 x i16>* %x
@@ -511,9 +487,8 @@ define <vscale x 2 x i32> @zextload_nxv2i16_nxv2i32(<vscale x 2 x i16>* %x) {
 define <vscale x 2 x i64> @sextload_nxv2i16_nxv2i64(<vscale x 2 x i16>* %x) {
 ; CHECK-LABEL: sextload_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vle16.v v10, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vsext.vf4 v8, v10
 ; CHECK-NEXT:    ret
   %y = load <vscale x 2 x i16>, <vscale x 2 x i16>* %x
@@ -524,9 +499,8 @@ define <vscale x 2 x i64> @sextload_nxv2i16_nxv2i64(<vscale x 2 x i16>* %x) {
 define <vscale x 2 x i64> @zextload_nxv2i16_nxv2i64(<vscale x 2 x i16>* %x) {
 ; CHECK-LABEL: zextload_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vle16.v v10, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vzext.vf4 v8, v10
 ; CHECK-NEXT:    ret
   %y = load <vscale x 2 x i16>, <vscale x 2 x i16>* %x
@@ -731,9 +705,8 @@ define void @truncstore_nxv1i32_nxv1i16(<vscale x 1 x i32> %x, <vscale x 1 x i16
 define <vscale x 1 x i64> @sextload_nxv1i32_nxv1i64(<vscale x 1 x i32>* %x) {
 ; CHECK-LABEL: sextload_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <vscale x 1 x i32>, <vscale x 1 x i32>* %x
@@ -744,9 +717,8 @@ define <vscale x 1 x i64> @sextload_nxv1i32_nxv1i64(<vscale x 1 x i32>* %x) {
 define <vscale x 1 x i64> @zextload_nxv1i32_nxv1i64(<vscale x 1 x i32>* %x) {
 ; CHECK-LABEL: zextload_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <vscale x 1 x i32>, <vscale x 1 x i32>* %x

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
index 4f141e44f8ac3..b832a6ab278d0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
@@ -39,9 +39,8 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ;
 ; LMULMAX8-LABEL: ctlz_v16i8:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; LMULMAX8-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; LMULMAX8-NEXT:    vle8.v v8, (a0)
-; LMULMAX8-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; LMULMAX8-NEXT:    vzext.vf4 v12, v8
 ; LMULMAX8-NEXT:    vfcvt.f.xu.v v12, v12
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
@@ -839,9 +838,8 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX8-LABEL: ctlz_v32i8:
 ; LMULMAX8:       # %bb.0:
 ; LMULMAX8-NEXT:    li a1, 32
-; LMULMAX8-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; LMULMAX8-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    vle8.v v8, (a0)
-; LMULMAX8-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    vzext.vf4 v16, v8
 ; LMULMAX8-NEXT:    vfcvt.f.xu.v v16, v16
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m4, ta, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
index f2fd6e1d0d3a9..0d66bd395a87e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
@@ -21,9 +21,8 @@ define <2 x i16> @sextload_v2i1_v2i16(<2 x i1>* %x) {
 define <2 x i16> @sextload_v2i8_v2i16(<2 x i8>* %x) {
 ; CHECK-LABEL: sextload_v2i8_v2i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <2 x i8>, <2 x i8>* %x
@@ -34,9 +33,8 @@ define <2 x i16> @sextload_v2i8_v2i16(<2 x i8>* %x) {
 define <2 x i16> @zextload_v2i8_v2i16(<2 x i8>* %x) {
 ; CHECK-LABEL: zextload_v2i8_v2i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <2 x i8>, <2 x i8>* %x
@@ -47,9 +45,8 @@ define <2 x i16> @zextload_v2i8_v2i16(<2 x i8>* %x) {
 define <2 x i32> @sextload_v2i8_v2i32(<2 x i8>* %x) {
 ; CHECK-LABEL: sextload_v2i8_v2i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vsext.vf4 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <2 x i8>, <2 x i8>* %x
@@ -60,9 +57,8 @@ define <2 x i32> @sextload_v2i8_v2i32(<2 x i8>* %x) {
 define <2 x i32> @zextload_v2i8_v2i32(<2 x i8>* %x) {
 ; CHECK-LABEL: zextload_v2i8_v2i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <2 x i8>, <2 x i8>* %x
@@ -73,9 +69,8 @@ define <2 x i32> @zextload_v2i8_v2i32(<2 x i8>* %x) {
 define <2 x i64> @sextload_v2i8_v2i64(<2 x i8>* %x) {
 ; CHECK-LABEL: sextload_v2i8_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vsext.vf8 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <2 x i8>, <2 x i8>* %x
@@ -86,9 +81,8 @@ define <2 x i64> @sextload_v2i8_v2i64(<2 x i8>* %x) {
 define <2 x i64> @zextload_v2i8_v2i64(<2 x i8>* %x) {
 ; CHECK-LABEL: zextload_v2i8_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vzext.vf8 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <2 x i8>, <2 x i8>* %x
@@ -99,9 +93,8 @@ define <2 x i64> @zextload_v2i8_v2i64(<2 x i8>* %x) {
 define <4 x i16> @sextload_v4i8_v4i16(<4 x i8>* %x) {
 ; CHECK-LABEL: sextload_v4i8_v4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <4 x i8>, <4 x i8>* %x
@@ -112,9 +105,8 @@ define <4 x i16> @sextload_v4i8_v4i16(<4 x i8>* %x) {
 define <4 x i16> @zextload_v4i8_v4i16(<4 x i8>* %x) {
 ; CHECK-LABEL: zextload_v4i8_v4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <4 x i8>, <4 x i8>* %x
@@ -125,9 +117,8 @@ define <4 x i16> @zextload_v4i8_v4i16(<4 x i8>* %x) {
 define <4 x i32> @sextload_v4i8_v4i32(<4 x i8>* %x) {
 ; CHECK-LABEL: sextload_v4i8_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vsext.vf4 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <4 x i8>, <4 x i8>* %x
@@ -138,9 +129,8 @@ define <4 x i32> @sextload_v4i8_v4i32(<4 x i8>* %x) {
 define <4 x i32> @zextload_v4i8_v4i32(<4 x i8>* %x) {
 ; CHECK-LABEL: zextload_v4i8_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <4 x i8>, <4 x i8>* %x
@@ -162,9 +152,8 @@ define <4 x i64> @sextload_v4i8_v4i64(<4 x i8>* %x) {
 ;
 ; LMULMAX4-LABEL: sextload_v4i8_v4i64:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX4-NEXT:    vle8.v v10, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; LMULMAX4-NEXT:    vsext.vf8 v8, v10
 ; LMULMAX4-NEXT:    ret
   %y = load <4 x i8>, <4 x i8>* %x
@@ -186,9 +175,8 @@ define <4 x i64> @zextload_v4i8_v4i64(<4 x i8>* %x) {
 ;
 ; LMULMAX4-LABEL: zextload_v4i8_v4i64:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX4-NEXT:    vle8.v v10, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; LMULMAX4-NEXT:    vzext.vf8 v8, v10
 ; LMULMAX4-NEXT:    ret
   %y = load <4 x i8>, <4 x i8>* %x
@@ -199,9 +187,8 @@ define <4 x i64> @zextload_v4i8_v4i64(<4 x i8>* %x) {
 define <8 x i16> @sextload_v8i8_v8i16(<8 x i8>* %x) {
 ; CHECK-LABEL: sextload_v8i8_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <8 x i8>, <8 x i8>* %x
@@ -212,9 +199,8 @@ define <8 x i16> @sextload_v8i8_v8i16(<8 x i8>* %x) {
 define <8 x i16> @zextload_v8i8_v8i16(<8 x i8>* %x) {
 ; CHECK-LABEL: zextload_v8i8_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <8 x i8>, <8 x i8>* %x
@@ -236,9 +222,8 @@ define <8 x i32> @sextload_v8i8_v8i32(<8 x i8>* %x) {
 ;
 ; LMULMAX4-LABEL: sextload_v8i8_v8i32:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX4-NEXT:    vle8.v v10, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; LMULMAX4-NEXT:    vsext.vf4 v8, v10
 ; LMULMAX4-NEXT:    ret
   %y = load <8 x i8>, <8 x i8>* %x
@@ -260,9 +245,8 @@ define <8 x i32> @zextload_v8i8_v8i32(<8 x i8>* %x) {
 ;
 ; LMULMAX4-LABEL: zextload_v8i8_v8i32:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX4-NEXT:    vle8.v v10, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; LMULMAX4-NEXT:    vzext.vf4 v8, v10
 ; LMULMAX4-NEXT:    ret
   %y = load <8 x i8>, <8 x i8>* %x
@@ -292,9 +276,8 @@ define <8 x i64> @sextload_v8i8_v8i64(<8 x i8>* %x) {
 ;
 ; LMULMAX4-LABEL: sextload_v8i8_v8i64:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; LMULMAX4-NEXT:    vle8.v v12, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; LMULMAX4-NEXT:    vsext.vf8 v8, v12
 ; LMULMAX4-NEXT:    ret
   %y = load <8 x i8>, <8 x i8>* %x
@@ -324,9 +307,8 @@ define <8 x i64> @zextload_v8i8_v8i64(<8 x i8>* %x) {
 ;
 ; LMULMAX4-LABEL: zextload_v8i8_v8i64:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; LMULMAX4-NEXT:    vle8.v v12, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; LMULMAX4-NEXT:    vzext.vf8 v8, v12
 ; LMULMAX4-NEXT:    ret
   %y = load <8 x i8>, <8 x i8>* %x
@@ -348,9 +330,8 @@ define <16 x i16> @sextload_v16i8_v16i16(<16 x i8>* %x) {
 ;
 ; LMULMAX4-LABEL: sextload_v16i8_v16i16:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; LMULMAX4-NEXT:    vle8.v v10, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; LMULMAX4-NEXT:    vsext.vf2 v8, v10
 ; LMULMAX4-NEXT:    ret
   %y = load <16 x i8>, <16 x i8>* %x
@@ -372,9 +353,8 @@ define <16 x i16> @zextload_v16i8_v16i16(<16 x i8>* %x) {
 ;
 ; LMULMAX4-LABEL: zextload_v16i8_v16i16:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; LMULMAX4-NEXT:    vle8.v v10, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; LMULMAX4-NEXT:    vzext.vf2 v8, v10
 ; LMULMAX4-NEXT:    ret
   %y = load <16 x i8>, <16 x i8>* %x
@@ -404,9 +384,8 @@ define <16 x i32> @sextload_v16i8_v16i32(<16 x i8>* %x) {
 ;
 ; LMULMAX4-LABEL: sextload_v16i8_v16i32:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; LMULMAX4-NEXT:    vle8.v v12, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; LMULMAX4-NEXT:    vsext.vf4 v8, v12
 ; LMULMAX4-NEXT:    ret
   %y = load <16 x i8>, <16 x i8>* %x
@@ -436,9 +415,8 @@ define <16 x i32> @zextload_v16i8_v16i32(<16 x i8>* %x) {
 ;
 ; LMULMAX4-LABEL: zextload_v16i8_v16i32:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; LMULMAX4-NEXT:    vle8.v v12, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; LMULMAX4-NEXT:    vzext.vf4 v8, v12
 ; LMULMAX4-NEXT:    ret
   %y = load <16 x i8>, <16 x i8>* %x
@@ -584,9 +562,8 @@ define void @truncstore_v2i16_v2i8(<2 x i16> %x, <2 x i8>* %z) {
 define <2 x i32> @sextload_v2i16_v2i32(<2 x i16>* %x) {
 ; CHECK-LABEL: sextload_v2i16_v2i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <2 x i16>, <2 x i16>* %x
@@ -597,9 +574,8 @@ define <2 x i32> @sextload_v2i16_v2i32(<2 x i16>* %x) {
 define <2 x i32> @zextload_v2i16_v2i32(<2 x i16>* %x) {
 ; CHECK-LABEL: zextload_v2i16_v2i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <2 x i16>, <2 x i16>* %x
@@ -610,9 +586,8 @@ define <2 x i32> @zextload_v2i16_v2i32(<2 x i16>* %x) {
 define <2 x i64> @sextload_v2i16_v2i64(<2 x i16>* %x) {
 ; CHECK-LABEL: sextload_v2i16_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vsext.vf4 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <2 x i16>, <2 x i16>* %x
@@ -623,9 +598,8 @@ define <2 x i64> @sextload_v2i16_v2i64(<2 x i16>* %x) {
 define <2 x i64> @zextload_v2i16_v2i64(<2 x i16>* %x) {
 ; CHECK-LABEL: zextload_v2i16_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <2 x i16>, <2 x i16>* %x
@@ -648,9 +622,8 @@ define void @truncstore_v4i16_v4i8(<4 x i16> %x, <4 x i8>* %z) {
 define <4 x i32> @sextload_v4i16_v4i32(<4 x i16>* %x) {
 ; CHECK-LABEL: sextload_v4i16_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <4 x i16>, <4 x i16>* %x
@@ -661,9 +634,8 @@ define <4 x i32> @sextload_v4i16_v4i32(<4 x i16>* %x) {
 define <4 x i32> @zextload_v4i16_v4i32(<4 x i16>* %x) {
 ; CHECK-LABEL: zextload_v4i16_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <4 x i16>, <4 x i16>* %x
@@ -685,9 +657,8 @@ define <4 x i64> @sextload_v4i16_v4i64(<4 x i16>* %x) {
 ;
 ; LMULMAX4-LABEL: sextload_v4i16_v4i64:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX4-NEXT:    vle16.v v10, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; LMULMAX4-NEXT:    vsext.vf4 v8, v10
 ; LMULMAX4-NEXT:    ret
   %y = load <4 x i16>, <4 x i16>* %x
@@ -709,9 +680,8 @@ define <4 x i64> @zextload_v4i16_v4i64(<4 x i16>* %x) {
 ;
 ; LMULMAX4-LABEL: zextload_v4i16_v4i64:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX4-NEXT:    vle16.v v10, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; LMULMAX4-NEXT:    vzext.vf4 v8, v10
 ; LMULMAX4-NEXT:    ret
   %y = load <4 x i16>, <4 x i16>* %x
@@ -745,9 +715,8 @@ define <8 x i32> @sextload_v8i16_v8i32(<8 x i16>* %x) {
 ;
 ; LMULMAX4-LABEL: sextload_v8i16_v8i32:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX4-NEXT:    vle16.v v10, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; LMULMAX4-NEXT:    vsext.vf2 v8, v10
 ; LMULMAX4-NEXT:    ret
   %y = load <8 x i16>, <8 x i16>* %x
@@ -769,9 +738,8 @@ define <8 x i32> @zextload_v8i16_v8i32(<8 x i16>* %x) {
 ;
 ; LMULMAX4-LABEL: zextload_v8i16_v8i32:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX4-NEXT:    vle16.v v10, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; LMULMAX4-NEXT:    vzext.vf2 v8, v10
 ; LMULMAX4-NEXT:    ret
   %y = load <8 x i16>, <8 x i16>* %x
@@ -801,9 +769,8 @@ define <8 x i64> @sextload_v8i16_v8i64(<8 x i16>* %x) {
 ;
 ; LMULMAX4-LABEL: sextload_v8i16_v8i64:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; LMULMAX4-NEXT:    vle16.v v12, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; LMULMAX4-NEXT:    vsext.vf4 v8, v12
 ; LMULMAX4-NEXT:    ret
   %y = load <8 x i16>, <8 x i16>* %x
@@ -833,9 +800,8 @@ define <8 x i64> @zextload_v8i16_v8i64(<8 x i16>* %x) {
 ;
 ; LMULMAX4-LABEL: zextload_v8i16_v8i64:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; LMULMAX4-NEXT:    vle16.v v12, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; LMULMAX4-NEXT:    vzext.vf4 v8, v12
 ; LMULMAX4-NEXT:    ret
   %y = load <8 x i16>, <8 x i16>* %x
@@ -886,9 +852,8 @@ define <16 x i32> @sextload_v16i16_v16i32(<16 x i16>* %x) {
 ;
 ; LMULMAX4-LABEL: sextload_v16i16_v16i32:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; LMULMAX4-NEXT:    vle16.v v12, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; LMULMAX4-NEXT:    vsext.vf2 v8, v12
 ; LMULMAX4-NEXT:    ret
   %y = load <16 x i16>, <16 x i16>* %x
@@ -917,9 +882,8 @@ define <16 x i32> @zextload_v16i16_v16i32(<16 x i16>* %x) {
 ;
 ; LMULMAX4-LABEL: zextload_v16i16_v16i32:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; LMULMAX4-NEXT:    vle16.v v12, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; LMULMAX4-NEXT:    vzext.vf2 v8, v12
 ; LMULMAX4-NEXT:    ret
   %y = load <16 x i16>, <16 x i16>* %x
@@ -1056,9 +1020,8 @@ define void @truncstore_v2i32_v2i16(<2 x i32> %x, <2 x i16>* %z) {
 define <2 x i64> @sextload_v2i32_v2i64(<2 x i32>* %x) {
 ; CHECK-LABEL: sextload_v2i32_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <2 x i32>, <2 x i32>* %x
@@ -1069,9 +1032,8 @@ define <2 x i64> @sextload_v2i32_v2i64(<2 x i32>* %x) {
 define <2 x i64> @zextload_v2i32_v2i64(<2 x i32>* %x) {
 ; CHECK-LABEL: zextload_v2i32_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
   %y = load <2 x i32>, <2 x i32>* %x
@@ -1119,9 +1081,8 @@ define <4 x i64> @sextload_v4i32_v4i64(<4 x i32>* %x) {
 ;
 ; LMULMAX4-LABEL: sextload_v4i32_v4i64:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX4-NEXT:    vle32.v v10, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; LMULMAX4-NEXT:    vsext.vf2 v8, v10
 ; LMULMAX4-NEXT:    ret
   %y = load <4 x i32>, <4 x i32>* %x
@@ -1143,9 +1104,8 @@ define <4 x i64> @zextload_v4i32_v4i64(<4 x i32>* %x) {
 ;
 ; LMULMAX4-LABEL: zextload_v4i32_v4i64:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX4-NEXT:    vle32.v v10, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; LMULMAX4-NEXT:    vzext.vf2 v8, v10
 ; LMULMAX4-NEXT:    ret
   %y = load <4 x i32>, <4 x i32>* %x
@@ -1225,9 +1185,8 @@ define <8 x i64> @sextload_v8i32_v8i64(<8 x i32>* %x) {
 ;
 ; LMULMAX4-LABEL: sextload_v8i32_v8i64:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; LMULMAX4-NEXT:    vle32.v v12, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; LMULMAX4-NEXT:    vsext.vf2 v8, v12
 ; LMULMAX4-NEXT:    ret
   %y = load <8 x i32>, <8 x i32>* %x
@@ -1256,9 +1215,8 @@ define <8 x i64> @zextload_v8i32_v8i64(<8 x i32>* %x) {
 ;
 ; LMULMAX4-LABEL: zextload_v8i32_v8i64:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; LMULMAX4-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; LMULMAX4-NEXT:    vle32.v v12, (a0)
-; LMULMAX4-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; LMULMAX4-NEXT:    vzext.vf2 v8, v12
 ; LMULMAX4-NEXT:    ret
   %y = load <8 x i32>, <8 x i32>* %x

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll
index faf403816b02e..5c78bec991ddf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll
@@ -116,9 +116,8 @@ define void @fpext_v8f16_v8f64(<8 x half>* %x, <8 x double>* %y) {
 define void @fpround_v2f32_v2f16(<2 x float>* %x, <2 x half>* %y) {
 ; CHECK-LABEL: fpround_v2f32_v2f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfncvt.f.f.w v9, v8
 ; CHECK-NEXT:    vse16.v v9, (a1)
 ; CHECK-NEXT:    ret
@@ -131,9 +130,8 @@ define void @fpround_v2f32_v2f16(<2 x float>* %x, <2 x half>* %y) {
 define void @fpround_v2f64_v2f16(<2 x double>* %x, <2 x half>* %y) {
 ; CHECK-LABEL: fpround_v2f64_v2f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v9
@@ -148,9 +146,8 @@ define void @fpround_v2f64_v2f16(<2 x double>* %x, <2 x half>* %y) {
 define void @fpround_v8f32_v8f16(<8 x float>* %x, <8 x half>* %y) {
 ; LMULMAX8-LABEL: fpround_v8f32_v8f16:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; LMULMAX8-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX8-NEXT:    vle32.v v8, (a0)
-; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; LMULMAX8-NEXT:    vfncvt.f.f.w v10, v8
 ; LMULMAX8-NEXT:    vse16.v v10, (a1)
 ; LMULMAX8-NEXT:    ret
@@ -158,10 +155,9 @@ define void @fpround_v8f32_v8f16(<8 x float>* %x, <8 x half>* %y) {
 ; LMULMAX1-LABEL: fpround_v8f32_v8f16:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    addi a2, a0, 16
-; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; LMULMAX1-NEXT:    vle32.v v8, (a0)
 ; LMULMAX1-NEXT:    vle32.v v9, (a2)
-; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.f.f.w v10, v8
 ; LMULMAX1-NEXT:    vfncvt.f.f.w v8, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
@@ -177,9 +173,8 @@ define void @fpround_v8f32_v8f16(<8 x float>* %x, <8 x half>* %y) {
 define void @fpround_v8f64_v8f16(<8 x double>* %x, <8 x half>* %y) {
 ; LMULMAX8-LABEL: fpround_v8f64_v8f16:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; LMULMAX8-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX8-NEXT:    vle64.v v8, (a0)
-; LMULMAX8-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; LMULMAX8-NEXT:    vfncvt.rod.f.f.w v12, v8
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; LMULMAX8-NEXT:    vfncvt.f.f.w v8, v12
@@ -189,36 +184,35 @@ define void @fpround_v8f64_v8f16(<8 x double>* %x, <8 x half>* %y) {
 ; LMULMAX1-LABEL: fpround_v8f64_v8f16:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    addi a2, a0, 48
-; LMULMAX1-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vle64.v v8, (a2)
 ; LMULMAX1-NEXT:    addi a2, a0, 32
-; LMULMAX1-NEXT:    vle64.v v9, (a2)
-; LMULMAX1-NEXT:    vle64.v v10, (a0)
+; LMULMAX1-NEXT:    vle64.v v9, (a0)
+; LMULMAX1-NEXT:    vle64.v v10, (a2)
 ; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vle64.v v11, (a0)
-; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vfncvt.rod.f.f.w v12, v10
+; LMULMAX1-NEXT:    vfncvt.rod.f.f.w v12, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vfncvt.f.f.w v10, v12
+; LMULMAX1-NEXT:    vfncvt.f.f.w v9, v12
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rod.f.f.w v12, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.f.f.w v11, v12
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
-; LMULMAX1-NEXT:    vslideup.vi v10, v11, 2
+; LMULMAX1-NEXT:    vslideup.vi v9, v11, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vfncvt.rod.f.f.w v11, v9
+; LMULMAX1-NEXT:    vfncvt.rod.f.f.w v11, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vfncvt.f.f.w v9, v11
+; LMULMAX1-NEXT:    vfncvt.f.f.w v10, v11
 ; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, tu, mu
-; LMULMAX1-NEXT:    vslideup.vi v10, v9, 4
+; LMULMAX1-NEXT:    vslideup.vi v9, v10, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vfncvt.rod.f.f.w v9, v8
+; LMULMAX1-NEXT:    vfncvt.rod.f.f.w v10, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vfncvt.f.f.w v8, v9
+; LMULMAX1-NEXT:    vfncvt.f.f.w v8, v10
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
-; LMULMAX1-NEXT:    vslideup.vi v10, v8, 6
-; LMULMAX1-NEXT:    vse16.v v10, (a1)
+; LMULMAX1-NEXT:    vslideup.vi v9, v8, 6
+; LMULMAX1-NEXT:    vse16.v v9, (a1)
 ; LMULMAX1-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %x
   %d = fptrunc <8 x double> %a to <8 x half>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
index a81086235da7e..951ff054a11e5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
@@ -85,9 +85,8 @@ define <4 x double> @vrgather_permute_shuffle_vu_v4f64(<4 x double> %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI4_0)
 ; RV32-NEXT:    addi a0, a0, %lo(.LCPI4_0)
-; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV32-NEXT:    vle16.v v12, (a0)
-; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32-NEXT:    vrgatherei16.vv v10, v8, v12
 ; RV32-NEXT:    vmv.v.v v8, v10
 ; RV32-NEXT:    ret
@@ -110,9 +109,8 @@ define <4 x double> @vrgather_permute_shuffle_uv_v4f64(<4 x double> %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI5_0)
 ; RV32-NEXT:    addi a0, a0, %lo(.LCPI5_0)
-; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV32-NEXT:    vle16.v v12, (a0)
-; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32-NEXT:    vrgatherei16.vv v10, v8, v12
 ; RV32-NEXT:    vmv.v.v v8, v10
 ; RV32-NEXT:    ret
@@ -135,9 +133,8 @@ define <4 x double> @vrgather_shuffle_vv_v4f64(<4 x double> %x, <4 x double> %y)
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI6_0)
 ; RV32-NEXT:    addi a0, a0, %lo(.LCPI6_0)
-; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV32-NEXT:    vle16.v v14, (a0)
-; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32-NEXT:    vrgatherei16.vv v12, v8, v14
 ; RV32-NEXT:    li a0, 8
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
index 6dce5690c4e70..6015f6d0ea6cb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
@@ -456,10 +456,9 @@ declare <4 x half> @llvm.copysign.v4f16(<4 x half>, <4 x half>)
 define void @copysign_neg_ext_v2f64_v2f32(<2 x double>* %x, <2 x float>* %y) {
 ; CHECK-LABEL: copysign_neg_ext_v2f64_v2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a1)
 ; CHECK-NEXT:    vle64.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
index 5a7edd2734ff2..5eff5f224740a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
@@ -365,9 +365,8 @@ define <2 x i1> @fp2ui_v2f16_v2i1(<2 x half> %x) {
 define void @fp2si_v2f64_v2i8(<2 x double>* %x, <2 x i8>* %y) {
 ; CHECK-LABEL: fp2si_v2f64_v2i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vncvt.x.x.w v8, v9
@@ -384,9 +383,8 @@ define void @fp2si_v2f64_v2i8(<2 x double>* %x, <2 x i8>* %y) {
 define void @fp2ui_v2f64_v2i8(<2 x double>* %x, <2 x i8>* %y) {
 ; CHECK-LABEL: fp2ui_v2f64_v2i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vncvt.x.x.w v8, v9
@@ -427,9 +425,8 @@ define <2 x i1> @fp2ui_v2f64_v2i1(<2 x double> %x) {
 define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX8-LABEL: fp2si_v8f64_v8i8:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; LMULMAX8-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX8-NEXT:    vle64.v v8, (a0)
-; LMULMAX8-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; LMULMAX8-NEXT:    vfncvt.rtz.x.f.w v12, v8
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; LMULMAX8-NEXT:    vncvt.x.x.w v8, v12
@@ -441,19 +438,18 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX1-LABEL: fp2si_v8f64_v8i8:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    addi a2, a0, 48
-; LMULMAX1-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vle64.v v8, (a2)
 ; LMULMAX1-NEXT:    addi a2, a0, 32
-; LMULMAX1-NEXT:    vle64.v v9, (a2)
-; LMULMAX1-NEXT:    vle64.v v10, (a0)
+; LMULMAX1-NEXT:    vle64.v v9, (a0)
+; LMULMAX1-NEXT:    vle64.v v10, (a2)
 ; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vle64.v v11, (a0)
-; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vfncvt.rtz.x.f.w v12, v10
+; LMULMAX1-NEXT:    vfncvt.rtz.x.f.w v12, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vncvt.x.x.w v10, v12
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v12
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vncvt.x.x.w v10, v10
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.x.f.w v12, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
@@ -461,24 +457,24 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v11, v11
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
-; LMULMAX1-NEXT:    vslideup.vi v10, v11, 2
+; LMULMAX1-NEXT:    vslideup.vi v9, v11, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vfncvt.rtz.x.f.w v11, v9
+; LMULMAX1-NEXT:    vfncvt.rtz.x.f.w v11, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
+; LMULMAX1-NEXT:    vncvt.x.x.w v10, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
+; LMULMAX1-NEXT:    vncvt.x.x.w v10, v10
 ; LMULMAX1-NEXT:    vsetivli zero, 6, e8, mf2, tu, mu
-; LMULMAX1-NEXT:    vslideup.vi v10, v9, 4
+; LMULMAX1-NEXT:    vslideup.vi v9, v10, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vfncvt.rtz.x.f.w v9, v8
+; LMULMAX1-NEXT:    vfncvt.rtz.x.f.w v10, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vncvt.x.x.w v8, v9
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
-; LMULMAX1-NEXT:    vslideup.vi v10, v8, 6
-; LMULMAX1-NEXT:    vse8.v v10, (a1)
+; LMULMAX1-NEXT:    vslideup.vi v9, v8, 6
+; LMULMAX1-NEXT:    vse8.v v9, (a1)
 ; LMULMAX1-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %x
   %d = fptosi <8 x double> %a to <8 x i8>
@@ -489,9 +485,8 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 define void @fp2ui_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX8-LABEL: fp2ui_v8f64_v8i8:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; LMULMAX8-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX8-NEXT:    vle64.v v8, (a0)
-; LMULMAX8-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; LMULMAX8-NEXT:    vfncvt.rtz.xu.f.w v12, v8
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; LMULMAX8-NEXT:    vncvt.x.x.w v8, v12
@@ -503,19 +498,18 @@ define void @fp2ui_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX1-LABEL: fp2ui_v8f64_v8i8:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    addi a2, a0, 48
-; LMULMAX1-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vle64.v v8, (a2)
 ; LMULMAX1-NEXT:    addi a2, a0, 32
-; LMULMAX1-NEXT:    vle64.v v9, (a2)
-; LMULMAX1-NEXT:    vle64.v v10, (a0)
+; LMULMAX1-NEXT:    vle64.v v9, (a0)
+; LMULMAX1-NEXT:    vle64.v v10, (a2)
 ; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vle64.v v11, (a0)
-; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vfncvt.rtz.xu.f.w v12, v10
+; LMULMAX1-NEXT:    vfncvt.rtz.xu.f.w v12, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vncvt.x.x.w v10, v12
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v12
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vncvt.x.x.w v10, v10
+; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.xu.f.w v12, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
@@ -523,24 +517,24 @@ define void @fp2ui_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v11, v11
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
-; LMULMAX1-NEXT:    vslideup.vi v10, v11, 2
+; LMULMAX1-NEXT:    vslideup.vi v9, v11, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vfncvt.rtz.xu.f.w v11, v9
+; LMULMAX1-NEXT:    vfncvt.rtz.xu.f.w v11, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vncvt.x.x.w v9, v11
+; LMULMAX1-NEXT:    vncvt.x.x.w v10, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vncvt.x.x.w v9, v9
+; LMULMAX1-NEXT:    vncvt.x.x.w v10, v10
 ; LMULMAX1-NEXT:    vsetivli zero, 6, e8, mf2, tu, mu
-; LMULMAX1-NEXT:    vslideup.vi v10, v9, 4
+; LMULMAX1-NEXT:    vslideup.vi v9, v10, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vfncvt.rtz.xu.f.w v9, v8
+; LMULMAX1-NEXT:    vfncvt.rtz.xu.f.w v10, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vncvt.x.x.w v8, v9
+; LMULMAX1-NEXT:    vncvt.x.x.w v8, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
-; LMULMAX1-NEXT:    vslideup.vi v10, v8, 6
-; LMULMAX1-NEXT:    vse8.v v10, (a1)
+; LMULMAX1-NEXT:    vslideup.vi v9, v8, 6
+; LMULMAX1-NEXT:    vse8.v v9, (a1)
 ; LMULMAX1-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %x
   %d = fptoui <8 x double> %a to <8 x i8>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
index 265be226f7269..9f21455c875a8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
@@ -201,9 +201,8 @@ define <8 x float> @ui2fp_v8i1_v8f32(<8 x i1> %x) {
 define void @si2fp_v2i16_v2f64(<2 x i16>* %x, <2 x double>* %y) {
 ; CHECK-LABEL: si2fp_v2i16_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vsext.vf4 v9, v8
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9
 ; CHECK-NEXT:    vse64.v v8, (a1)
@@ -217,9 +216,8 @@ define void @si2fp_v2i16_v2f64(<2 x i16>* %x, <2 x double>* %y) {
 define void @ui2fp_v2i16_v2f64(<2 x i16>* %x, <2 x double>* %y) {
 ; CHECK-LABEL: ui2fp_v2i16_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vzext.vf4 v9, v8
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9
 ; CHECK-NEXT:    vse64.v v8, (a1)
@@ -233,9 +231,8 @@ define void @ui2fp_v2i16_v2f64(<2 x i16>* %x, <2 x double>* %y) {
 define void @si2fp_v8i16_v8f64(<8 x i16>* %x, <8 x double>* %y) {
 ; LMULMAX8-LABEL: si2fp_v8i16_v8f64:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; LMULMAX8-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; LMULMAX8-NEXT:    vle16.v v8, (a0)
-; LMULMAX8-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; LMULMAX8-NEXT:    vsext.vf4 v12, v8
 ; LMULMAX8-NEXT:    vfcvt.f.x.v v8, v12
 ; LMULMAX8-NEXT:    vse64.v v8, (a1)
@@ -278,9 +275,8 @@ define void @si2fp_v8i16_v8f64(<8 x i16>* %x, <8 x double>* %y) {
 define void @ui2fp_v8i16_v8f64(<8 x i16>* %x, <8 x double>* %y) {
 ; LMULMAX8-LABEL: ui2fp_v8i16_v8f64:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; LMULMAX8-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; LMULMAX8-NEXT:    vle16.v v8, (a0)
-; LMULMAX8-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; LMULMAX8-NEXT:    vzext.vf4 v12, v8
 ; LMULMAX8-NEXT:    vfcvt.f.xu.v v8, v12
 ; LMULMAX8-NEXT:    vse64.v v8, (a1)
@@ -425,9 +421,8 @@ define <8 x double> @ui2fp_v8i1_v8f64(<8 x i1> %x) {
 define void @si2fp_v2i64_v2f16(<2 x i64>* %x, <2 x half>* %y) {
 ; CHECK-LABEL: si2fp_v2i64_v2f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.f.x.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v9
@@ -442,9 +437,8 @@ define void @si2fp_v2i64_v2f16(<2 x i64>* %x, <2 x half>* %y) {
 define void @ui2fp_v2i64_v2f16(<2 x i64>* %x, <2 x half>* %y) {
 ; CHECK-LABEL: ui2fp_v2i64_v2f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v9
@@ -483,9 +477,8 @@ define <2 x half> @ui2fp_v2i1_v2f16(<2 x i1> %x) {
 define void @si2fp_v8i64_v8f16(<8 x i64>* %x, <8 x half>* %y) {
 ; LMULMAX8-LABEL: si2fp_v8i64_v8f16:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; LMULMAX8-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX8-NEXT:    vle64.v v8, (a0)
-; LMULMAX8-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; LMULMAX8-NEXT:    vfncvt.f.x.w v12, v8
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; LMULMAX8-NEXT:    vfncvt.f.f.w v8, v12
@@ -495,36 +488,35 @@ define void @si2fp_v8i64_v8f16(<8 x i64>* %x, <8 x half>* %y) {
 ; LMULMAX1-LABEL: si2fp_v8i64_v8f16:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    addi a2, a0, 48
-; LMULMAX1-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vle64.v v8, (a2)
 ; LMULMAX1-NEXT:    addi a2, a0, 32
-; LMULMAX1-NEXT:    vle64.v v9, (a2)
-; LMULMAX1-NEXT:    vle64.v v10, (a0)
+; LMULMAX1-NEXT:    vle64.v v9, (a0)
+; LMULMAX1-NEXT:    vle64.v v10, (a2)
 ; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vle64.v v11, (a0)
-; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vfncvt.f.x.w v12, v10
+; LMULMAX1-NEXT:    vfncvt.f.x.w v12, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vfncvt.f.f.w v10, v12
+; LMULMAX1-NEXT:    vfncvt.f.f.w v9, v12
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.f.x.w v12, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.f.f.w v11, v12
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
-; LMULMAX1-NEXT:    vslideup.vi v10, v11, 2
+; LMULMAX1-NEXT:    vslideup.vi v9, v11, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vfncvt.f.x.w v11, v9
+; LMULMAX1-NEXT:    vfncvt.f.x.w v11, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vfncvt.f.f.w v9, v11
+; LMULMAX1-NEXT:    vfncvt.f.f.w v10, v11
 ; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, tu, mu
-; LMULMAX1-NEXT:    vslideup.vi v10, v9, 4
+; LMULMAX1-NEXT:    vslideup.vi v9, v10, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vfncvt.f.x.w v9, v8
+; LMULMAX1-NEXT:    vfncvt.f.x.w v10, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vfncvt.f.f.w v8, v9
+; LMULMAX1-NEXT:    vfncvt.f.f.w v8, v10
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
-; LMULMAX1-NEXT:    vslideup.vi v10, v8, 6
-; LMULMAX1-NEXT:    vse16.v v10, (a1)
+; LMULMAX1-NEXT:    vslideup.vi v9, v8, 6
+; LMULMAX1-NEXT:    vse16.v v9, (a1)
 ; LMULMAX1-NEXT:    ret
   %a = load <8 x i64>, <8 x i64>* %x
   %d = sitofp <8 x i64> %a to <8 x half>
@@ -535,9 +527,8 @@ define void @si2fp_v8i64_v8f16(<8 x i64>* %x, <8 x half>* %y) {
 define void @ui2fp_v8i64_v8f16(<8 x i64>* %x, <8 x half>* %y) {
 ; LMULMAX8-LABEL: ui2fp_v8i64_v8f16:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; LMULMAX8-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX8-NEXT:    vle64.v v8, (a0)
-; LMULMAX8-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; LMULMAX8-NEXT:    vfncvt.f.xu.w v12, v8
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; LMULMAX8-NEXT:    vfncvt.f.f.w v8, v12
@@ -547,36 +538,35 @@ define void @ui2fp_v8i64_v8f16(<8 x i64>* %x, <8 x half>* %y) {
 ; LMULMAX1-LABEL: ui2fp_v8i64_v8f16:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    addi a2, a0, 48
-; LMULMAX1-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vle64.v v8, (a2)
 ; LMULMAX1-NEXT:    addi a2, a0, 32
-; LMULMAX1-NEXT:    vle64.v v9, (a2)
-; LMULMAX1-NEXT:    vle64.v v10, (a0)
+; LMULMAX1-NEXT:    vle64.v v9, (a0)
+; LMULMAX1-NEXT:    vle64.v v10, (a2)
 ; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vle64.v v11, (a0)
-; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vfncvt.f.xu.w v12, v10
+; LMULMAX1-NEXT:    vfncvt.f.xu.w v12, v9
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vfncvt.f.f.w v10, v12
+; LMULMAX1-NEXT:    vfncvt.f.f.w v9, v12
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.f.xu.w v12, v11
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.f.f.w v11, v12
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
-; LMULMAX1-NEXT:    vslideup.vi v10, v11, 2
+; LMULMAX1-NEXT:    vslideup.vi v9, v11, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vfncvt.f.xu.w v11, v9
+; LMULMAX1-NEXT:    vfncvt.f.xu.w v11, v10
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vfncvt.f.f.w v9, v11
+; LMULMAX1-NEXT:    vfncvt.f.f.w v10, v11
 ; LMULMAX1-NEXT:    vsetivli zero, 6, e16, m1, tu, mu
-; LMULMAX1-NEXT:    vslideup.vi v10, v9, 4
+; LMULMAX1-NEXT:    vslideup.vi v9, v10, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; LMULMAX1-NEXT:    vfncvt.f.xu.w v9, v8
+; LMULMAX1-NEXT:    vfncvt.f.xu.w v10, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; LMULMAX1-NEXT:    vfncvt.f.f.w v8, v9
+; LMULMAX1-NEXT:    vfncvt.f.f.w v8, v10
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, tu, mu
-; LMULMAX1-NEXT:    vslideup.vi v10, v8, 6
-; LMULMAX1-NEXT:    vse16.v v10, (a1)
+; LMULMAX1-NEXT:    vslideup.vi v9, v8, 6
+; LMULMAX1-NEXT:    vse16.v v9, (a1)
 ; LMULMAX1-NEXT:    ret
   %a = load <8 x i64>, <8 x i64>* %x
   %d = uitofp <8 x i64> %a to <8 x half>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
index ca6f78dce853d..7ea275b31aab0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
@@ -9,9 +9,8 @@
 define void @sext_v4i8_v4i32(<4 x i8>* %x, <4 x i32>* %z) {
 ; CHECK-LABEL: sext_v4i8_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vsext.vf4 v9, v8
 ; CHECK-NEXT:    vse32.v v9, (a1)
 ; CHECK-NEXT:    ret
@@ -24,9 +23,8 @@ define void @sext_v4i8_v4i32(<4 x i8>* %x, <4 x i32>* %z) {
 define void @zext_v4i8_v4i32(<4 x i8>* %x, <4 x i32>* %z) {
 ; CHECK-LABEL: zext_v4i8_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vzext.vf4 v9, v8
 ; CHECK-NEXT:    vse32.v v9, (a1)
 ; CHECK-NEXT:    ret
@@ -39,18 +37,16 @@ define void @zext_v4i8_v4i32(<4 x i8>* %x, <4 x i32>* %z) {
 define void @sext_v8i8_v8i32(<8 x i8>* %x, <8 x i32>* %z) {
 ; LMULMAX8-LABEL: sext_v8i8_v8i32:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; LMULMAX8-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX8-NEXT:    vle8.v v8, (a0)
-; LMULMAX8-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; LMULMAX8-NEXT:    vsext.vf4 v10, v8
 ; LMULMAX8-NEXT:    vse32.v v10, (a1)
 ; LMULMAX8-NEXT:    ret
 ;
 ; LMULMAX2-LABEL: sext_v8i8_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
-; LMULMAX2-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; LMULMAX2-NEXT:    vsext.vf4 v10, v8
 ; LMULMAX2-NEXT:    vse32.v v10, (a1)
 ; LMULMAX2-NEXT:    ret
@@ -78,9 +74,8 @@ define void @sext_v32i8_v32i32(<32 x i8>* %x, <32 x i32>* %z) {
 ; LMULMAX8-LABEL: sext_v32i8_v32i32:
 ; LMULMAX8:       # %bb.0:
 ; LMULMAX8-NEXT:    li a2, 32
-; LMULMAX8-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
+; LMULMAX8-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    vle8.v v8, (a0)
-; LMULMAX8-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    vsext.vf4 v16, v8
 ; LMULMAX8-NEXT:    vse32.v v16, (a1)
 ; LMULMAX8-NEXT:    ret
@@ -166,9 +161,8 @@ define void @sext_v32i8_v32i32(<32 x i8>* %x, <32 x i32>* %z) {
 define void @trunc_v4i8_v4i32(<4 x i32>* %x, <4 x i8>* %z) {
 ; CHECK-LABEL: trunc_v4i8_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vncvt.x.x.w v8, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vncvt.x.x.w v8, v8
@@ -183,9 +177,8 @@ define void @trunc_v4i8_v4i32(<4 x i32>* %x, <4 x i8>* %z) {
 define void @trunc_v8i8_v8i32(<8 x i32>* %x, <8 x i8>* %z) {
 ; LMULMAX8-LABEL: trunc_v8i8_v8i32:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; LMULMAX8-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX8-NEXT:    vle32.v v8, (a0)
-; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; LMULMAX8-NEXT:    vncvt.x.x.w v10, v8
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; LMULMAX8-NEXT:    vncvt.x.x.w v8, v10
@@ -194,9 +187,8 @@ define void @trunc_v8i8_v8i32(<8 x i32>* %x, <8 x i8>* %z) {
 ;
 ; LMULMAX2-LABEL: trunc_v8i8_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; LMULMAX2-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX2-NEXT:    vle32.v v8, (a0)
-; LMULMAX2-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; LMULMAX2-NEXT:    vncvt.x.x.w v10, v8
 ; LMULMAX2-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; LMULMAX2-NEXT:    vncvt.x.x.w v8, v10
@@ -205,11 +197,10 @@ define void @trunc_v8i8_v8i32(<8 x i32>* %x, <8 x i8>* %z) {
 ;
 ; LMULMAX1-LABEL: trunc_v8i8_v8i32:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; LMULMAX1-NEXT:    vle32.v v8, (a0)
 ; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vle32.v v9, (a0)
-; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; LMULMAX1-NEXT:    vncvt.x.x.w v8, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
index c09a21bdcb491..a67319cd5b407 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
@@ -141,9 +141,8 @@ define <8 x i64> @vrgather_permute_shuffle_vu_v8i64(<8 x i64> %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI9_0)
 ; RV32-NEXT:    addi a0, a0, %lo(.LCPI9_0)
-; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vle16.v v16, (a0)
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vrgatherei16.vv v12, v8, v16
 ; RV32-NEXT:    vmv.v.v v8, v12
 ; RV32-NEXT:    ret
@@ -166,9 +165,8 @@ define <8 x i64> @vrgather_permute_shuffle_uv_v8i64(<8 x i64> %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI10_0)
 ; RV32-NEXT:    addi a0, a0, %lo(.LCPI10_0)
-; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vle16.v v16, (a0)
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vrgatherei16.vv v12, v8, v16
 ; RV32-NEXT:    vmv.v.v v8, v12
 ; RV32-NEXT:    ret
@@ -197,9 +195,8 @@ define <8 x i64> @vrgather_shuffle_vv_v8i64(<8 x i64> %x, <8 x i64> %y) {
 ; RV32-NEXT:    vslideup.vi v20, v16, 7
 ; RV32-NEXT:    lui a0, %hi(.LCPI11_0)
 ; RV32-NEXT:    addi a0, a0, %lo(.LCPI11_0)
-; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV32-NEXT:    vle16.v v21, (a0)
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT:    vle16.v v21, (a0)
 ; RV32-NEXT:    vrgatherei16.vv v16, v8, v21
 ; RV32-NEXT:    li a0, 164
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
@@ -238,9 +235,8 @@ define <8 x i64> @vrgather_shuffle_xv_v8i64(<8 x i64> %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI12_0)
 ; RV32-NEXT:    addi a0, a0, %lo(.LCPI12_0)
-; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vle16.v v16, (a0)
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vmv.v.i v20, -1
 ; RV32-NEXT:    vrgatherei16.vv v12, v20, v16
 ; RV32-NEXT:    li a0, 113
@@ -248,9 +244,8 @@ define <8 x i64> @vrgather_shuffle_xv_v8i64(<8 x i64> %x) {
 ; RV32-NEXT:    vmv.s.x v0, a0
 ; RV32-NEXT:    lui a0, %hi(.LCPI12_1)
 ; RV32-NEXT:    addi a0, a0, %lo(.LCPI12_1)
-; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vle16.v v16, (a0)
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vrgatherei16.vv v12, v8, v16, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
 ; RV32-NEXT:    ret
@@ -277,21 +272,19 @@ define <8 x i64> @vrgather_shuffle_vx_v8i64(<8 x i64> %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    lui a0, %hi(.LCPI13_0)
 ; RV32-NEXT:    addi a0, a0, %lo(.LCPI13_0)
-; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vle16.v v16, (a0)
-; RV32-NEXT:    vmv4r.v v12, v8
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32-NEXT:    vrgatherei16.vv v8, v12, v16
+; RV32-NEXT:    vrgatherei16.vv v12, v8, v16
 ; RV32-NEXT:    li a0, 140
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV32-NEXT:    vmv.s.x v0, a0
 ; RV32-NEXT:    lui a0, %hi(.LCPI13_1)
 ; RV32-NEXT:    addi a0, a0, %lo(.LCPI13_1)
-; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT:    vle16.v v12, (a0)
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT:    vle16.v v8, (a0)
 ; RV32-NEXT:    vmv.v.i v16, 5
-; RV32-NEXT:    vrgatherei16.vv v8, v16, v12, v0.t
+; RV32-NEXT:    vrgatherei16.vv v12, v16, v8, v0.t
+; RV32-NEXT:    vmv.v.v v8, v12
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vrgather_shuffle_vx_v8i64:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
index a25117fa7f258..a883f16877c4b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
@@ -732,7 +732,7 @@ define void @vadd_vx_v16i64(<16 x i64>* %a, i64 %b, <16 x i64>* %c) {
 ; LMULMAX1-RV32-LABEL: vadd_vx_v16i64:
 ; LMULMAX1-RV32:       # %bb.0:
 ; LMULMAX1-RV32-NEXT:    addi a4, a0, 96
-; LMULMAX1-RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; LMULMAX1-RV32-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v8, (a4)
 ; LMULMAX1-RV32-NEXT:    addi a4, a0, 112
 ; LMULMAX1-RV32-NEXT:    vle64.v v9, (a4)
@@ -748,7 +748,6 @@ define void @vadd_vx_v16i64(<16 x i64>* %a, i64 %b, <16 x i64>* %c) {
 ; LMULMAX1-RV32-NEXT:    addi a0, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v15, (a0)
 ; LMULMAX1-RV32-NEXT:    li a0, 5
-; LMULMAX1-RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmv.s.x v0, a0
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmv.v.x v16, a2

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
index fae34a78360a8..7dccf2c9d7df6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
@@ -431,9 +431,8 @@ define float @vreduce_ord_fadd_v4f32(<4 x float>* %x, float %s) {
 define float @vreduce_fwadd_v4f32(<4 x half>* %x, float %s) {
 ; CHECK-LABEL: vreduce_fwadd_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfwredusum.vs v8, v8, v9
@@ -449,9 +448,8 @@ define float @vreduce_fwadd_v4f32(<4 x half>* %x, float %s) {
 define float @vreduce_ord_fwadd_v4f32(<4 x half>* %x, float %s) {
 ; CHECK-LABEL: vreduce_ord_fwadd_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v8, v9
@@ -815,9 +813,8 @@ define double @vreduce_fwadd_v1f64(<1 x float>* %x, double %s) {
 define double @vreduce_ord_fwadd_v1f64(<1 x float>* %x, double %s) {
 ; CHECK-LABEL: vreduce_ord_fwadd_v1f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v8, v9
@@ -863,9 +860,8 @@ define double @vreduce_ord_fadd_v2f64(<2 x double>* %x, double %s) {
 define double @vreduce_fwadd_v2f64(<2 x float>* %x, double %s) {
 ; CHECK-LABEL: vreduce_fwadd_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfwredusum.vs v8, v8, v9
@@ -881,9 +877,8 @@ define double @vreduce_fwadd_v2f64(<2 x float>* %x, double %s) {
 define double @vreduce_ord_fwadd_v2f64(<2 x float>* %x, double %s) {
 ; CHECK-LABEL: vreduce_ord_fwadd_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v8, v9

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
index 1875213357fe6..9fe34c6634f0e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
@@ -176,9 +176,8 @@ define i16 @vreduce_add_v1i16(<1 x i16>* %x) {
 define i16 @vwreduce_add_v1i16(<1 x i8>* %x) {
 ; CHECK-LABEL: vwreduce_add_v1i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v9, v8
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -191,9 +190,8 @@ define i16 @vwreduce_add_v1i16(<1 x i8>* %x) {
 define i16 @vwreduce_uadd_v1i16(<1 x i8>* %x) {
 ; CHECK-LABEL: vwreduce_uadd_v1i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v9, v8
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -326,9 +324,8 @@ define i16 @vreduce_add_v8i16(<8 x i16>* %x) {
 define i16 @vwreduce_add_v8i16(<8 x i8>* %x) {
 ; CHECK-LABEL: vwreduce_add_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vwredsum.vs v8, v8, v9
@@ -344,9 +341,8 @@ define i16 @vwreduce_add_v8i16(<8 x i8>* %x) {
 define i16 @vwreduce_uadd_v8i16(<8 x i8>* %x) {
 ; CHECK-LABEL: vwreduce_uadd_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v8, v9
@@ -610,9 +606,8 @@ define i32 @vreduce_add_v1i32(<1 x i32>* %x) {
 define i32 @vwreduce_add_v1i32(<1 x i16>* %x) {
 ; CHECK-LABEL: vwreduce_add_v1i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v9, v8
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -625,9 +620,8 @@ define i32 @vwreduce_add_v1i32(<1 x i16>* %x) {
 define i32 @vwreduce_uadd_v1i32(<1 x i16>* %x) {
 ; CHECK-LABEL: vwreduce_uadd_v1i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v9, v8
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
@@ -708,9 +702,8 @@ define i32 @vreduce_add_v4i32(<4 x i32>* %x) {
 define i32 @vwreduce_add_v4i32(<4 x i16>* %x) {
 ; CHECK-LABEL: vwreduce_add_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vwredsum.vs v8, v8, v9
@@ -726,9 +719,8 @@ define i32 @vwreduce_add_v4i32(<4 x i16>* %x) {
 define i32 @vwreduce_uadd_v4i32(<4 x i16>* %x) {
 ; CHECK-LABEL: vwreduce_uadd_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, zero
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v8, v9
@@ -997,9 +989,8 @@ define i64 @vreduce_add_v1i64(<1 x i64>* %x) {
 define i64 @vwreduce_add_v1i64(<1 x i32>* %x) {
 ; RV32-LABEL: vwreduce_add_v1i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vle32.v v8, (a0)
-; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32-NEXT:    vsext.vf2 v9, v8
 ; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v8, v9, a0
@@ -1009,9 +1000,8 @@ define i64 @vwreduce_add_v1i64(<1 x i32>* %x) {
 ;
 ; RV64-LABEL: vwreduce_add_v1i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV64-NEXT:    vle32.v v8, (a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vsext.vf2 v9, v8
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -1024,9 +1014,8 @@ define i64 @vwreduce_add_v1i64(<1 x i32>* %x) {
 define i64 @vwreduce_uadd_v1i64(<1 x i32>* %x) {
 ; RV32-LABEL: vwreduce_uadd_v1i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vle32.v v8, (a0)
-; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32-NEXT:    vzext.vf2 v9, v8
 ; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v8, v9, a0
@@ -1036,9 +1025,8 @@ define i64 @vwreduce_uadd_v1i64(<1 x i32>* %x) {
 ;
 ; RV64-LABEL: vwreduce_uadd_v1i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV64-NEXT:    vle32.v v8, (a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vzext.vf2 v9, v8
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
@@ -1080,9 +1068,8 @@ define i64 @vreduce_add_v2i64(<2 x i64>* %x) {
 define i64 @vwreduce_add_v2i64(<2 x i32>* %x) {
 ; RV32-LABEL: vwreduce_add_v2i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    vle32.v v8, (a0)
-; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32-NEXT:    vmv.s.x v9, zero
 ; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV32-NEXT:    vwredsum.vs v8, v8, v9
@@ -1096,9 +1083,8 @@ define i64 @vwreduce_add_v2i64(<2 x i32>* %x) {
 ;
 ; RV64-LABEL: vwreduce_add_v2i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV64-NEXT:    vle32.v v8, (a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmv.s.x v9, zero
 ; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV64-NEXT:    vwredsum.vs v8, v8, v9
@@ -1114,9 +1100,8 @@ define i64 @vwreduce_add_v2i64(<2 x i32>* %x) {
 define i64 @vwreduce_uadd_v2i64(<2 x i32>* %x) {
 ; RV32-LABEL: vwreduce_uadd_v2i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    vle32.v v8, (a0)
-; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32-NEXT:    vmv.s.x v9, zero
 ; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV32-NEXT:    vwredsumu.vs v8, v8, v9
@@ -1130,9 +1115,8 @@ define i64 @vwreduce_uadd_v2i64(<2 x i32>* %x) {
 ;
 ; RV64-LABEL: vwreduce_uadd_v2i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV64-NEXT:    vle32.v v8, (a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmv.s.x v9, zero
 ; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV64-NEXT:    vwredsumu.vs v8, v8, v9

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
index c5f6769baae5e..339c0489cc324 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
@@ -391,10 +391,9 @@ define <32 x double> @vfwadd_vf_v32f32(<32 x float>* %x, float %y) {
 define <2 x float> @vfwadd_wv_v2f16(<2 x float> *%x, <2 x half> *%y) {
 ; CHECK-LABEL: vfwadd_wv_v2f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle32.v v9, (a0)
 ; CHECK-NEXT:    vle16.v v10, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfwadd.wv v8, v9, v10
 ; CHECK-NEXT:    ret
   %a = load <2 x float>, <2 x float>* %x
@@ -407,10 +406,9 @@ define <2 x float> @vfwadd_wv_v2f16(<2 x float> *%x, <2 x half> *%y) {
 define <4 x float> @vfwadd_wv_v4f16(<4 x float> *%x, <4 x half> *%y) {
 ; CHECK-LABEL: vfwadd_wv_v4f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle32.v v9, (a0)
 ; CHECK-NEXT:    vle16.v v10, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfwadd.wv v8, v9, v10
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
@@ -423,10 +421,9 @@ define <4 x float> @vfwadd_wv_v4f16(<4 x float> *%x, <4 x half> *%y) {
 define <8 x float> @vfwadd_wv_v8f16(<8 x float> *%x, <8 x half> *%y) {
 ; CHECK-LABEL: vfwadd_wv_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v10, (a0)
 ; CHECK-NEXT:    vle16.v v12, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfwadd.wv v8, v10, v12
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
@@ -439,10 +436,9 @@ define <8 x float> @vfwadd_wv_v8f16(<8 x float> *%x, <8 x half> *%y) {
 define <16 x float> @vfwadd_wv_v16f16(<16 x float> *%x, <16 x half> *%y) {
 ; CHECK-LABEL: vfwadd_wv_v16f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vle32.v v12, (a0)
 ; CHECK-NEXT:    vle16.v v16, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfwadd.wv v8, v12, v16
 ; CHECK-NEXT:    ret
   %a = load <16 x float>, <16 x float>* %x
@@ -456,10 +452,9 @@ define <32 x float> @vfwadd_wv_v32f16(<32 x float> *%x, <32 x half> *%y) {
 ; CHECK-LABEL: vfwadd_wv_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a2, 32
-; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
+; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
 ; CHECK-NEXT:    vle32.v v16, (a0)
 ; CHECK-NEXT:    vle16.v v24, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vfwadd.wv v8, v16, v24
 ; CHECK-NEXT:    ret
   %a = load <32 x float>, <32 x float>* %x
@@ -472,10 +467,9 @@ define <32 x float> @vfwadd_wv_v32f16(<32 x float> *%x, <32 x half> *%y) {
 define <2 x double> @vfwadd_wv_v2f32(<2 x double> *%x, <2 x float> *%y) {
 ; CHECK-LABEL: vfwadd_wv_v2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle64.v v9, (a0)
 ; CHECK-NEXT:    vle32.v v10, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfwadd.wv v8, v9, v10
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
@@ -488,10 +482,9 @@ define <2 x double> @vfwadd_wv_v2f32(<2 x double> *%x, <2 x float> *%y) {
 define <4 x double> @vfwadd_wv_v4f32(<4 x double> *%x, <4 x float> *%y) {
 ; CHECK-LABEL: vfwadd_wv_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle64.v v10, (a0)
 ; CHECK-NEXT:    vle32.v v12, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfwadd.wv v8, v10, v12
 ; CHECK-NEXT:    ret
   %a = load <4 x double>, <4 x double>* %x
@@ -504,10 +497,9 @@ define <4 x double> @vfwadd_wv_v4f32(<4 x double> *%x, <4 x float> *%y) {
 define <8 x double> @vfwadd_wv_v8f32(<8 x double> *%x, <8 x float> *%y) {
 ; CHECK-LABEL: vfwadd_wv_v8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vle64.v v12, (a0)
 ; CHECK-NEXT:    vle32.v v16, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfwadd.wv v8, v12, v16
 ; CHECK-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %x
@@ -520,10 +512,9 @@ define <8 x double> @vfwadd_wv_v8f32(<8 x double> *%x, <8 x float> *%y) {
 define <16 x double> @vfwadd_wv_v16f32(<16 x double> *%x, <16 x float> *%y) {
 ; CHECK-LABEL: vfwadd_wv_v16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vle64.v v16, (a0)
 ; CHECK-NEXT:    vle32.v v24, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfwadd.wv v8, v16, v24
 ; CHECK-NEXT:    ret
   %a = load <16 x double>, <16 x double>* %x
@@ -536,9 +527,8 @@ define <16 x double> @vfwadd_wv_v16f32(<16 x double> *%x, <16 x float> *%y) {
 define <2 x float> @vfwadd_wf_v2f16(<2 x float>* %x, half %y) {
 ; CHECK-LABEL: vfwadd_wf_v2f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
 ; CHECK-NEXT:    ret
   %a = load <2 x float>, <2 x float>* %x
@@ -552,9 +542,8 @@ define <2 x float> @vfwadd_wf_v2f16(<2 x float>* %x, half %y) {
 define <4 x float> @vfwadd_wf_v4f16(<4 x float>* %x, half %y) {
 ; CHECK-LABEL: vfwadd_wf_v4f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
@@ -568,9 +557,8 @@ define <4 x float> @vfwadd_wf_v4f16(<4 x float>* %x, half %y) {
 define <8 x float> @vfwadd_wf_v8f16(<8 x float>* %x, half %y) {
 ; CHECK-LABEL: vfwadd_wf_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
@@ -584,9 +572,8 @@ define <8 x float> @vfwadd_wf_v8f16(<8 x float>* %x, half %y) {
 define <16 x float> @vfwadd_wf_v16f16(<16 x float>* %x, half %y) {
 ; CHECK-LABEL: vfwadd_wf_v16f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
 ; CHECK-NEXT:    ret
   %a = load <16 x float>, <16 x float>* %x
@@ -600,9 +587,8 @@ define <16 x float> @vfwadd_wf_v16f16(<16 x float>* %x, half %y) {
 define <2 x double> @vfwadd_wf_v2f32(<2 x double>* %x, float %y) {
 ; CHECK-LABEL: vfwadd_wf_v2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
@@ -616,9 +602,8 @@ define <2 x double> @vfwadd_wf_v2f32(<2 x double>* %x, float %y) {
 define <4 x double> @vfwadd_wf_v4f32(<4 x double>* %x, float %y) {
 ; CHECK-LABEL: vfwadd_wf_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
 ; CHECK-NEXT:    ret
   %a = load <4 x double>, <4 x double>* %x
@@ -632,9 +617,8 @@ define <4 x double> @vfwadd_wf_v4f32(<4 x double>* %x, float %y) {
 define <8 x double> @vfwadd_wf_v8f32(<8 x double>* %x, float %y) {
 ; CHECK-LABEL: vfwadd_wf_v8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
 ; CHECK-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %x
@@ -648,9 +632,8 @@ define <8 x double> @vfwadd_wf_v8f32(<8 x double>* %x, float %y) {
 define <16 x double> @vfwadd_wf_v16f32(<16 x double>* %x, float %y) {
 ; CHECK-LABEL: vfwadd_wf_v16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, fa0
 ; CHECK-NEXT:    ret
   %a = load <16 x double>, <16 x double>* %x

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
index 585700b3ae46c..ea35a2e438e93 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
@@ -394,10 +394,9 @@ define <32 x double> @vfwsub_vf_v32f32(<32 x float>* %x, float %y) {
 define <2 x float> @vfwsub_wv_v2f16(<2 x float> *%x, <2 x half> *%y) {
 ; CHECK-LABEL: vfwsub_wv_v2f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle32.v v9, (a0)
 ; CHECK-NEXT:    vle16.v v10, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfwsub.wv v8, v9, v10
 ; CHECK-NEXT:    ret
   %a = load <2 x float>, <2 x float>* %x
@@ -410,10 +409,9 @@ define <2 x float> @vfwsub_wv_v2f16(<2 x float> *%x, <2 x half> *%y) {
 define <4 x float> @vfwsub_wv_v4f16(<4 x float> *%x, <4 x half> *%y) {
 ; CHECK-LABEL: vfwsub_wv_v4f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle32.v v9, (a0)
 ; CHECK-NEXT:    vle16.v v10, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfwsub.wv v8, v9, v10
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
@@ -426,10 +424,9 @@ define <4 x float> @vfwsub_wv_v4f16(<4 x float> *%x, <4 x half> *%y) {
 define <8 x float> @vfwsub_wv_v8f16(<8 x float> *%x, <8 x half> *%y) {
 ; CHECK-LABEL: vfwsub_wv_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v10, (a0)
 ; CHECK-NEXT:    vle16.v v12, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfwsub.wv v8, v10, v12
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
@@ -442,10 +439,9 @@ define <8 x float> @vfwsub_wv_v8f16(<8 x float> *%x, <8 x half> *%y) {
 define <16 x float> @vfwsub_wv_v16f16(<16 x float> *%x, <16 x half> *%y) {
 ; CHECK-LABEL: vfwsub_wv_v16f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vle32.v v12, (a0)
 ; CHECK-NEXT:    vle16.v v16, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfwsub.wv v8, v12, v16
 ; CHECK-NEXT:    ret
   %a = load <16 x float>, <16 x float>* %x
@@ -459,10 +455,9 @@ define <32 x float> @vfwsub_wv_v32f16(<32 x float> *%x, <32 x half> *%y) {
 ; CHECK-LABEL: vfwsub_wv_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a2, 32
-; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
+; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
 ; CHECK-NEXT:    vle32.v v16, (a0)
 ; CHECK-NEXT:    vle16.v v24, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vfwsub.wv v8, v16, v24
 ; CHECK-NEXT:    ret
   %a = load <32 x float>, <32 x float>* %x
@@ -475,10 +470,9 @@ define <32 x float> @vfwsub_wv_v32f16(<32 x float> *%x, <32 x half> *%y) {
 define <2 x double> @vfwsub_wv_v2f32(<2 x double> *%x, <2 x float> *%y) {
 ; CHECK-LABEL: vfwsub_wv_v2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle64.v v9, (a0)
 ; CHECK-NEXT:    vle32.v v10, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfwsub.wv v8, v9, v10
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
@@ -491,10 +485,9 @@ define <2 x double> @vfwsub_wv_v2f32(<2 x double> *%x, <2 x float> *%y) {
 define <4 x double> @vfwsub_wv_v4f32(<4 x double> *%x, <4 x float> *%y) {
 ; CHECK-LABEL: vfwsub_wv_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle64.v v10, (a0)
 ; CHECK-NEXT:    vle32.v v12, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfwsub.wv v8, v10, v12
 ; CHECK-NEXT:    ret
   %a = load <4 x double>, <4 x double>* %x
@@ -507,10 +500,9 @@ define <4 x double> @vfwsub_wv_v4f32(<4 x double> *%x, <4 x float> *%y) {
 define <8 x double> @vfwsub_wv_v8f32(<8 x double> *%x, <8 x float> *%y) {
 ; CHECK-LABEL: vfwsub_wv_v8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vle64.v v12, (a0)
 ; CHECK-NEXT:    vle32.v v16, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfwsub.wv v8, v12, v16
 ; CHECK-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %x
@@ -523,10 +515,9 @@ define <8 x double> @vfwsub_wv_v8f32(<8 x double> *%x, <8 x float> *%y) {
 define <16 x double> @vfwsub_wv_v16f32(<16 x double> *%x, <16 x float> *%y) {
 ; CHECK-LABEL: vfwsub_wv_v16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vle64.v v16, (a0)
 ; CHECK-NEXT:    vle32.v v24, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfwsub.wv v8, v16, v24
 ; CHECK-NEXT:    ret
   %a = load <16 x double>, <16 x double>* %x
@@ -539,9 +530,8 @@ define <16 x double> @vfwsub_wv_v16f32(<16 x double> *%x, <16 x float> *%y) {
 define <2 x float> @vfwsub_wf_v2f16(<2 x float>* %x, half %y) {
 ; CHECK-LABEL: vfwsub_wf_v2f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, fa0
 ; CHECK-NEXT:    ret
   %a = load <2 x float>, <2 x float>* %x
@@ -555,9 +545,8 @@ define <2 x float> @vfwsub_wf_v2f16(<2 x float>* %x, half %y) {
 define <4 x float> @vfwsub_wf_v4f16(<4 x float>* %x, half %y) {
 ; CHECK-LABEL: vfwsub_wf_v4f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, fa0
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
@@ -571,9 +560,8 @@ define <4 x float> @vfwsub_wf_v4f16(<4 x float>* %x, half %y) {
 define <8 x float> @vfwsub_wf_v8f16(<8 x float>* %x, half %y) {
 ; CHECK-LABEL: vfwsub_wf_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, fa0
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
@@ -587,9 +575,8 @@ define <8 x float> @vfwsub_wf_v8f16(<8 x float>* %x, half %y) {
 define <16 x float> @vfwsub_wf_v16f16(<16 x float>* %x, half %y) {
 ; CHECK-LABEL: vfwsub_wf_v16f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, fa0
 ; CHECK-NEXT:    ret
   %a = load <16 x float>, <16 x float>* %x
@@ -603,9 +590,8 @@ define <16 x float> @vfwsub_wf_v16f16(<16 x float>* %x, half %y) {
 define <2 x double> @vfwsub_wf_v2f32(<2 x double>* %x, float %y) {
 ; CHECK-LABEL: vfwsub_wf_v2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, fa0
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
@@ -619,9 +605,8 @@ define <2 x double> @vfwsub_wf_v2f32(<2 x double>* %x, float %y) {
 define <4 x double> @vfwsub_wf_v4f32(<4 x double>* %x, float %y) {
 ; CHECK-LABEL: vfwsub_wf_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, fa0
 ; CHECK-NEXT:    ret
   %a = load <4 x double>, <4 x double>* %x
@@ -635,9 +620,8 @@ define <4 x double> @vfwsub_wf_v4f32(<4 x double>* %x, float %y) {
 define <8 x double> @vfwsub_wf_v8f32(<8 x double>* %x, float %y) {
 ; CHECK-LABEL: vfwsub_wf_v8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, fa0
 ; CHECK-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %x
@@ -651,9 +635,8 @@ define <8 x double> @vfwsub_wf_v8f32(<8 x double>* %x, float %y) {
 define <16 x double> @vfwsub_wf_v16f32(<16 x double>* %x, float %y) {
 ; CHECK-LABEL: vfwsub_wf_v16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, fa0
 ; CHECK-NEXT:    ret
   %a = load <16 x double>, <16 x double>* %x

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
index df6924809bb7e..c0df3405d462b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
@@ -352,10 +352,9 @@ define <32 x i64> @vwadd_v32i64(<32 x i32>* %x, <32 x i32>* %y) nounwind {
 define <2 x i32> @vwadd_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) {
 ; CHECK-LABEL: vwadd_v2i32_v2i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a1)
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v10, v8
 ; CHECK-NEXT:    vsext.vf2 v11, v9
 ; CHECK-NEXT:    vwadd.vv v8, v11, v10
@@ -371,10 +370,9 @@ define <2 x i32> @vwadd_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) {
 define <4 x i32> @vwadd_v4i32_v4i8_v4i16(<4 x i8>* %x, <4 x i16>* %y) {
 ; CHECK-LABEL: vwadd_v4i32_v4i8_v4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vle16.v v9, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v10, v8
 ; CHECK-NEXT:    vwadd.vv v8, v10, v9
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
index 2835726025e3c..7b94083d870b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
@@ -352,10 +352,9 @@ define <32 x i64> @vwaddu_v32i64(<32 x i32>* %x, <32 x i32>* %y) nounwind {
 define <2 x i32> @vwaddu_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) {
 ; CHECK-LABEL: vwaddu_v2i32_v2i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a1)
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v10, v8
 ; CHECK-NEXT:    vzext.vf2 v11, v9
 ; CHECK-NEXT:    vwaddu.vv v8, v11, v10
@@ -371,10 +370,9 @@ define <2 x i32> @vwaddu_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) {
 define <4 x i32> @vwaddu_v4i32_v4i8_v4i16(<4 x i8>* %x, <4 x i16>* %y) {
 ; CHECK-LABEL: vwaddu_v4i32_v4i8_v4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vle16.v v9, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v10, v8
 ; CHECK-NEXT:    vwaddu.vv v8, v10, v9
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
index c219c36f0d6b7..5335fbeb6499b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
@@ -355,10 +355,9 @@ define <32 x i64> @vwmul_v32i64(<32 x i32>* %x, <32 x i32>* %y) {
 define <2 x i32> @vwmul_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) {
 ; CHECK-LABEL: vwmul_v2i32_v2i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a1)
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v10, v8
 ; CHECK-NEXT:    vsext.vf2 v11, v9
 ; CHECK-NEXT:    vwmul.vv v8, v11, v10
@@ -374,10 +373,9 @@ define <2 x i32> @vwmul_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) {
 define <4 x i32> @vwmul_v4i32_v4i8_v4i16(<4 x i8>* %x, <4 x i16>* %y) {
 ; CHECK-LABEL: vwmul_v4i32_v4i8_v4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vle16.v v9, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v10, v8
 ; CHECK-NEXT:    vwmul.vv v8, v10, v9
 ; CHECK-NEXT:    ret
@@ -670,10 +668,9 @@ define <8 x i16> @vwmul_vx_v8i16_i8(<8 x i8>* %x, i8* %y) {
 define <8 x i16> @vwmul_vx_v8i16_i16(<8 x i8>* %x, i16* %y) {
 ; CHECK-LABEL: vwmul_vx_v8i16_i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    lh a0, 0(a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v9, v8
 ; CHECK-NEXT:    vmul.vx v8, v9, a0
 ; CHECK-NEXT:    ret
@@ -725,10 +722,9 @@ define <4 x i32> @vwmul_vx_v4i32_i16(<4 x i16>* %x, i16* %y) {
 define <4 x i32> @vwmul_vx_v4i32_i32(<4 x i16>* %x, i32* %y) {
 ; CHECK-LABEL: vwmul_vx_v4i32_i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    lw a0, 0(a1)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v9, v8
 ; CHECK-NEXT:    vmul.vx v8, v9, a0
 ; CHECK-NEXT:    ret
@@ -746,7 +742,7 @@ define <2 x i64> @vwmul_vx_v2i64_i8(<2 x i32>* %x, i8* %y) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    lb a1, 0(a1)
 ; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:    srai a0, a1, 31
@@ -754,7 +750,6 @@ define <2 x i64> @vwmul_vx_v2i64_i8(<2 x i32>* %x, i8* %y) {
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32-NEXT:    vsext.vf2 v10, v8
 ; RV32-NEXT:    vmul.vv v8, v9, v10
 ; RV32-NEXT:    addi sp, sp, 16
@@ -782,7 +777,7 @@ define <2 x i64> @vwmul_vx_v2i64_i16(<2 x i32>* %x, i16* %y) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    lh a1, 0(a1)
 ; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:    srai a0, a1, 31
@@ -790,7 +785,6 @@ define <2 x i64> @vwmul_vx_v2i64_i16(<2 x i32>* %x, i16* %y) {
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32-NEXT:    vsext.vf2 v10, v8
 ; RV32-NEXT:    vmul.vv v8, v9, v10
 ; RV32-NEXT:    addi sp, sp, 16
@@ -818,7 +812,7 @@ define <2 x i64> @vwmul_vx_v2i64_i32(<2 x i32>* %x, i32* %y) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    lw a1, 0(a1)
 ; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:    srai a0, a1, 31
@@ -826,7 +820,6 @@ define <2 x i64> @vwmul_vx_v2i64_i32(<2 x i32>* %x, i32* %y) {
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32-NEXT:    vsext.vf2 v10, v8
 ; RV32-NEXT:    vmul.vv v8, v9, v10
 ; RV32-NEXT:    addi sp, sp, 16
@@ -854,7 +847,7 @@ define <2 x i64> @vwmul_vx_v2i64_i64(<2 x i32>* %x, i64* %y) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    lw a2, 4(a1)
 ; RV32-NEXT:    lw a1, 0(a1)
 ; RV32-NEXT:    vle32.v v8, (a0)
@@ -862,7 +855,6 @@ define <2 x i64> @vwmul_vx_v2i64_i64(<2 x i32>* %x, i64* %y) {
 ; RV32-NEXT:    sw a1, 8(sp)
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32-NEXT:    vsext.vf2 v10, v8
 ; RV32-NEXT:    vmul.vv v8, v9, v10
 ; RV32-NEXT:    addi sp, sp, 16
@@ -870,10 +862,9 @@ define <2 x i64> @vwmul_vx_v2i64_i64(<2 x i32>* %x, i64* %y) {
 ;
 ; RV64-LABEL: vwmul_vx_v2i64_i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV64-NEXT:    vle32.v v8, (a0)
 ; RV64-NEXT:    ld a0, 0(a1)
-; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vsext.vf2 v9, v8
 ; RV64-NEXT:    vmul.vx v8, v9, a0
 ; RV64-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
index 69414143825d3..2746c8e4411b6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
@@ -371,10 +371,9 @@ define <32 x i64> @vwmulsu_v32i64(<32 x i32>* %x, <32 x i32>* %y) {
 define <2 x i32> @vwmulsu_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) {
 ; CHECK-LABEL: vwmulsu_v2i32_v2i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vle8.v v9, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v10, v8
 ; CHECK-NEXT:    vsext.vf2 v11, v9
 ; CHECK-NEXT:    vwmulsu.vv v8, v11, v10
@@ -390,10 +389,9 @@ define <2 x i32> @vwmulsu_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) {
 define <4 x i32> @vwmulsu_v4i32_v4i8_v4i16(<4 x i8>* %x, <4 x i16>* %y) {
 ; CHECK-LABEL: vwmulsu_v4i32_v4i8_v4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vle16.v v9, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v10, v8
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10
 ; CHECK-NEXT:    ret
@@ -703,10 +701,9 @@ define <8 x i16> @vwmulsu_vx_v8i16_i8(<8 x i8>* %x, i8* %y) {
 define <8 x i16> @vwmulsu_vx_v8i16_i8_swap(<8 x i8>* %x, i8* %y) {
 ; CHECK-LABEL: vwmulsu_vx_v8i16_i8_swap:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    lb a0, 0(a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v9, v8
 ; CHECK-NEXT:    vmul.vx v8, v9, a0
 ; CHECK-NEXT:    ret
@@ -761,14 +758,13 @@ define <2 x i64> @vwmulsu_vx_v2i64_i8(<2 x i32>* %x, i8* %y) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    lbu a1, 0(a1)
 ; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:    sw zero, 12(sp)
 ; RV32-NEXT:    sw a1, 8(sp)
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32-NEXT:    vsext.vf2 v10, v8
 ; RV32-NEXT:    vmul.vv v8, v9, v10
 ; RV32-NEXT:    addi sp, sp, 16
@@ -796,14 +792,13 @@ define <2 x i64> @vwmulsu_vx_v2i64_i16(<2 x i32>* %x, i16* %y) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    lhu a1, 0(a1)
 ; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:    sw zero, 12(sp)
 ; RV32-NEXT:    sw a1, 8(sp)
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32-NEXT:    vsext.vf2 v10, v8
 ; RV32-NEXT:    vmul.vv v8, v9, v10
 ; RV32-NEXT:    addi sp, sp, 16
@@ -831,14 +826,13 @@ define <2 x i64> @vwmulsu_vx_v2i64_i32(<2 x i32>* %x, i32* %y) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    lw a1, 0(a1)
 ; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:    sw zero, 12(sp)
 ; RV32-NEXT:    sw a1, 8(sp)
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32-NEXT:    vsext.vf2 v10, v8
 ; RV32-NEXT:    vmul.vv v8, v9, v10
 ; RV32-NEXT:    addi sp, sp, 16

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
index fd05f10311921..15b31f67fe266 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
@@ -355,10 +355,9 @@ define <32 x i64> @vwmulu_v32i64(<32 x i32>* %x, <32 x i32>* %y) {
 define <2 x i32> @vwmulu_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) {
 ; CHECK-LABEL: vwmulu_v2i32_v2i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a1)
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v10, v8
 ; CHECK-NEXT:    vzext.vf2 v11, v9
 ; CHECK-NEXT:    vwmulu.vv v8, v11, v10
@@ -374,10 +373,9 @@ define <2 x i32> @vwmulu_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) {
 define <4 x i32> @vwmulu_v4i32_v4i8_v4i16(<4 x i8>* %x, <4 x i16>* %y) {
 ; CHECK-LABEL: vwmulu_v4i32_v4i8_v4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vle16.v v9, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v10, v8
 ; CHECK-NEXT:    vwmulu.vv v8, v10, v9
 ; CHECK-NEXT:    ret
@@ -670,10 +668,9 @@ define <8 x i16> @vwmulu_vx_v8i16_i8(<8 x i8>* %x, i8* %y) {
 define <8 x i16> @vwmulu_vx_v8i16_i16(<8 x i8>* %x, i16* %y) {
 ; CHECK-LABEL: vwmulu_vx_v8i16_i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    lh a0, 0(a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v9, v8
 ; CHECK-NEXT:    vmul.vx v8, v9, a0
 ; CHECK-NEXT:    ret
@@ -725,10 +722,9 @@ define <4 x i32> @vwmulu_vx_v4i32_i16(<4 x i16>* %x, i16* %y) {
 define <4 x i32> @vwmulu_vx_v4i32_i32(<4 x i16>* %x, i32* %y) {
 ; CHECK-LABEL: vwmulu_vx_v4i32_i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    lw a0, 0(a1)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v9, v8
 ; CHECK-NEXT:    vmul.vx v8, v9, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll
index fc6a9377ed2d4..8ed07f041cbc0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll
@@ -352,10 +352,9 @@ define <32 x i64> @vwsub_v32i64(<32 x i32>* %x, <32 x i32>* %y) nounwind {
 define <2 x i32> @vwsub_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) {
 ; CHECK-LABEL: vwsub_v2i32_v2i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a1)
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v10, v8
 ; CHECK-NEXT:    vsext.vf2 v11, v9
 ; CHECK-NEXT:    vwsub.vv v8, v11, v10
@@ -371,10 +370,9 @@ define <2 x i32> @vwsub_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) {
 define <4 x i32> @vwsub_v4i32_v4i8_v4i16(<4 x i8>* %x, <4 x i16>* %y) {
 ; CHECK-LABEL: vwsub_v4i32_v4i8_v4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vle16.v v9, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vsext.vf2 v10, v8
 ; CHECK-NEXT:    vwsub.vv v8, v10, v9
 ; CHECK-NEXT:    ret
@@ -649,10 +647,9 @@ define <16 x i64> @vwsub_vx_v16i64(<16 x i32>* %x, i32 %y) {
 define <8 x i16> @vwsub_vx_v8i16_i8(<8 x i8>* %x, i8* %y) {
 ; CHECK-LABEL: vwsub_vx_v8i16_i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    lb a1, 0(a1)
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v10, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vwsub.wv v8, v10, v9
@@ -687,10 +684,9 @@ define <8 x i16> @vwsub_vx_v8i16_i16(<8 x i8>* %x, i16* %y) {
 define <4 x i32> @vwsub_vx_v4i32_i8(<4 x i16>* %x, i8* %y) {
 ; CHECK-LABEL: vwsub_vx_v4i32_i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    lb a1, 0(a1)
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v10, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vwsub.wv v8, v10, v9
@@ -708,10 +704,9 @@ define <4 x i32> @vwsub_vx_v4i32_i8(<4 x i16>* %x, i8* %y) {
 define <4 x i32> @vwsub_vx_v4i32_i16(<4 x i16>* %x, i16* %y) {
 ; CHECK-LABEL: vwsub_vx_v4i32_i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    lh a1, 0(a1)
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v10, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vwsub.wv v8, v10, v9
@@ -761,10 +756,9 @@ define <2 x i64> @vwsub_vx_v2i64_i8(<2 x i32>* %x, i8* %y) nounwind {
 ;
 ; RV64-LABEL: vwsub_vx_v2i64_i8:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV64-NEXT:    lb a1, 0(a1)
 ; RV64-NEXT:    vle32.v v9, (a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmv.v.x v10, a1
 ; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV64-NEXT:    vwsub.wv v8, v10, v9
@@ -797,10 +791,9 @@ define <2 x i64> @vwsub_vx_v2i64_i16(<2 x i32>* %x, i16* %y) nounwind {
 ;
 ; RV64-LABEL: vwsub_vx_v2i64_i16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV64-NEXT:    lh a1, 0(a1)
 ; RV64-NEXT:    vle32.v v9, (a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmv.v.x v10, a1
 ; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV64-NEXT:    vwsub.wv v8, v10, v9
@@ -833,10 +826,9 @@ define <2 x i64> @vwsub_vx_v2i64_i32(<2 x i32>* %x, i32* %y) nounwind {
 ;
 ; RV64-LABEL: vwsub_vx_v2i64_i32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV64-NEXT:    lw a1, 0(a1)
 ; RV64-NEXT:    vle32.v v9, (a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmv.v.x v10, a1
 ; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV64-NEXT:    vwsub.wv v8, v10, v9

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
index c7bcaca598f85..2e0ba4885ee62 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
@@ -352,10 +352,9 @@ define <32 x i64> @vwsubu_v32i64(<32 x i32>* %x, <32 x i32>* %y) nounwind {
 define <2 x i32> @vwsubu_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) {
 ; CHECK-LABEL: vwsubu_v2i32_v2i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a1)
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v10, v8
 ; CHECK-NEXT:    vzext.vf2 v11, v9
 ; CHECK-NEXT:    vwsubu.vv v8, v11, v10
@@ -371,10 +370,9 @@ define <2 x i32> @vwsubu_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) {
 define <4 x i32> @vwsubu_v4i32_v4i8_v4i16(<4 x i8>* %x, <4 x i16>* %y) {
 ; CHECK-LABEL: vwsubu_v4i32_v4i8_v4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vle16.v v9, (a1)
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vzext.vf2 v10, v8
 ; CHECK-NEXT:    vwsubu.vv v8, v10, v9
 ; CHECK-NEXT:    ret
@@ -649,10 +647,9 @@ define <16 x i64> @vwsubu_vx_v16i64(<16 x i32>* %x, i32 %y) {
 define <8 x i16> @vwsubu_vx_v8i16_i8(<8 x i8>* %x, i8* %y) {
 ; CHECK-LABEL: vwsubu_vx_v8i16_i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    lbu a1, 0(a1)
 ; CHECK-NEXT:    vle8.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v10, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vwsubu.wv v8, v10, v9
@@ -687,10 +684,9 @@ define <8 x i16> @vwsubu_vx_v8i16_i16(<8 x i8>* %x, i16* %y) {
 define <4 x i32> @vwsubu_vx_v4i32_i8(<4 x i16>* %x, i8* %y) {
 ; CHECK-LABEL: vwsubu_vx_v4i32_i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    lbu a1, 0(a1)
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v10, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vwsubu.wv v8, v10, v9
@@ -708,10 +704,9 @@ define <4 x i32> @vwsubu_vx_v4i32_i8(<4 x i16>* %x, i8* %y) {
 define <4 x i32> @vwsubu_vx_v4i32_i16(<4 x i16>* %x, i16* %y) {
 ; CHECK-LABEL: vwsubu_vx_v4i32_i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    lhu a1, 0(a1)
 ; CHECK-NEXT:    vle16.v v9, (a0)
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v10, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vwsubu.wv v8, v10, v9
@@ -760,10 +755,9 @@ define <2 x i64> @vwsubu_vx_v2i64_i8(<2 x i32>* %x, i8* %y) nounwind {
 ;
 ; RV64-LABEL: vwsubu_vx_v2i64_i8:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV64-NEXT:    lbu a1, 0(a1)
 ; RV64-NEXT:    vle32.v v9, (a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmv.v.x v10, a1
 ; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV64-NEXT:    vwsubu.wv v8, v10, v9
@@ -795,10 +789,9 @@ define <2 x i64> @vwsubu_vx_v2i64_i16(<2 x i32>* %x, i16* %y) nounwind {
 ;
 ; RV64-LABEL: vwsubu_vx_v2i64_i16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV64-NEXT:    lhu a1, 0(a1)
 ; RV64-NEXT:    vle32.v v9, (a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmv.v.x v10, a1
 ; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV64-NEXT:    vwsubu.wv v8, v10, v9
@@ -830,10 +823,9 @@ define <2 x i64> @vwsubu_vx_v2i64_i32(<2 x i32>* %x, i32* %y) nounwind {
 ;
 ; RV64-LABEL: vwsubu_vx_v2i64_i32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV64-NEXT:    lwu a1, 0(a1)
 ; RV64-NEXT:    vle32.v v9, (a0)
-; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmv.v.x v10, a1
 ; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV64-NEXT:    vwsubu.wv v8, v10, v9

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
index 6556a253611d9..6fc37f7cfaf9e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
@@ -194,9 +194,8 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 87 /* e32, mf2, ta, mu */, implicit-def $vl, implicit-def $vtype
+    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 [[COPY1]], $noreg, 5 /* e32 */, implicit $vl, implicit $vtype
-    ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl
     ; CHECK-NEXT: early-clobber %3:vr = PseudoVZEXT_VF2_M1 killed [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v8 = COPY %3
     ; CHECK-NEXT: PseudoRET implicit $v8


        


More information about the llvm-commits mailing list