[llvm] 95c6824 - [RISCV] Teach CleanupVSETVLI to remove 'vsetvli zero, zero, vtype' when the vtype matches the previous vsetvli or vsetivli

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 25 07:51:29 PST 2021


Author: Craig Topper
Date: 2021-02-25T07:51:19-08:00
New Revision: 95c68249952803330739b7311dd2bdc7b18e272f

URL: https://github.com/llvm/llvm-project/commit/95c68249952803330739b7311dd2bdc7b18e272f
DIFF: https://github.com/llvm/llvm-project/commit/95c68249952803330739b7311dd2bdc7b18e272f.diff

LOG: [RISCV] Teach CleanupVSETVLI to remove 'vsetvli zero, zero, vtype' when the vtype matches the previous vsetvli or vsetivli

Reviewed By: frasercrmck, arcbbb

Differential Revision: https://reviews.llvm.org/D97408

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVCleanupVSETVLI.cpp
    llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
    llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVCleanupVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVCleanupVSETVLI.cpp
index ae32cbd1ae59e..426860f63e1b3 100644
--- a/llvm/lib/Target/RISCV/RISCVCleanupVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVCleanupVSETVLI.cpp
@@ -52,6 +52,56 @@ char RISCVCleanupVSETVLI::ID = 0;
 INITIALIZE_PASS(RISCVCleanupVSETVLI, DEBUG_TYPE,
                 RISCV_CLEANUP_VSETVLI_NAME, false, false)
 
+static bool isRedundantVSETVLI(MachineInstr &MI, MachineInstr *PrevVSETVLI) {
+  // If we don't have a previous VSET{I}VLI or the VL output isn't dead, we
+  // can't remove this VSETVLI.
+  if (!PrevVSETVLI || !MI.getOperand(0).isDead())
+    return false;
+
+  // Does this VSET{I}VLI use the same VTYPE immediate.
+  int64_t PrevVTYPEImm = PrevVSETVLI->getOperand(2).getImm();
+  int64_t VTYPEImm = MI.getOperand(2).getImm();
+  if (PrevVTYPEImm != VTYPEImm)
+    return false;
+
+  if (MI.getOpcode() == RISCV::PseudoVSETIVLI) {
+    // If the previous opcode wasn't vsetivli we can't compare them.
+    if (PrevVSETVLI->getOpcode() != RISCV::PseudoVSETIVLI)
+      return false;
+
+    // For VSETIVLI, we can just compare the immediates.
+    return PrevVSETVLI->getOperand(1).getImm() == MI.getOperand(1).getImm();
+  }
+
+  assert(MI.getOpcode() == RISCV::PseudoVSETVLI);
+  Register AVLReg = MI.getOperand(1).getReg();
+
+  // If this VSETVLI isn't changing VL, it is redundant.
+  if (AVLReg == RISCV::X0 && MI.getOperand(0).getReg() == RISCV::X0)
+    return true;
+
+  // If the previous opcode isn't vsetvli we can't do any more comparison.
+  if (PrevVSETVLI->getOpcode() != RISCV::PseudoVSETVLI)
+    return false;
+
+  // Does this VSETVLI use the same AVL register?
+  if (AVLReg != PrevVSETVLI->getOperand(1).getReg())
+    return false;
+
+  // If the AVLReg is X0 we must be setting VL to VLMAX. Keeping VL unchanged
+  // was handled above.
+  if (AVLReg == RISCV::X0) {
+    // This instruction is setting VL to VLMAX, this is redundant if the
+    // previous VSETVLI was also setting VL to VLMAX. But it is not redundant
+    // if they were setting it to any other value or leaving VL unchanged.
+    Register PrevOutVL = PrevVSETVLI->getOperand(0).getReg();
+    return PrevOutVL != RISCV::X0;
+  }
+
+  // This vsetvli is redundant.
+  return true;
+}
+
 bool RISCVCleanupVSETVLI::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
   bool Changed = false;
   MachineInstr *PrevVSETVLI = nullptr;
@@ -70,62 +120,14 @@ bool RISCVCleanupVSETVLI::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
       continue;
     }
 
-    // If we don't have a previous VSET{I}VLI or the VL output isn't dead, we
-    // can't remove this VSETVLI.
-    if (!PrevVSETVLI || !MI.getOperand(0).isDead()) {
-      PrevVSETVLI = &MI;
-      continue;
-    }
-
-    // If a previous "set vl" instruction opcode is 
diff erent from this one, we
-    // can't 
diff erentiate the AVL values.
-    if (PrevVSETVLI->getOpcode() != MI.getOpcode()) {
-      PrevVSETVLI = &MI;
-      continue;
-    }
-
-    // The remaining two cases are
-    // 1. PrevVSETVLI = PseudoVSETVLI
-    //    MI = PseudoVSETVLI
-    //
-    // 2. PrevVSETVLI = PseudoVSETIVLI
-    //    MI = PseudoVSETIVLI
-    Register AVLReg;
-    bool SameAVL = false;
-    if (MI.getOpcode() == RISCV::PseudoVSETVLI) {
-      AVLReg = MI.getOperand(1).getReg();
-      SameAVL = PrevVSETVLI->getOperand(1).getReg() == AVLReg;
-    } else { // RISCV::PseudoVSETIVLI
-      SameAVL =
-          PrevVSETVLI->getOperand(1).getImm() == MI.getOperand(1).getImm();
-    }
-    int64_t PrevVTYPEImm = PrevVSETVLI->getOperand(2).getImm();
-    int64_t VTYPEImm = MI.getOperand(2).getImm();
-
-    // Does this VSET{I}VLI use the same AVL register/value and VTYPE immediate?
-    if (!SameAVL || PrevVTYPEImm != VTYPEImm) {
+    if (isRedundantVSETVLI(MI, PrevVSETVLI)) {
+      // This VSETVLI is redundant, remove it.
+      MI.eraseFromParent();
+      Changed = true;
+    } else {
+      // Otherwise update VSET{I}VLI for the next iteration.
       PrevVSETVLI = &MI;
-      continue;
     }
-
-    // If the AVLReg is X0 we need to look at the output VL of both VSETVLIs.
-    if ((MI.getOpcode() == RISCV::PseudoVSETVLI) && (AVLReg == RISCV::X0)) {
-      assert((PrevVSETVLI->getOpcode() == RISCV::PseudoVSETVLI) &&
-             "Unexpected vsetvli opcode.");
-      Register PrevOutVL = PrevVSETVLI->getOperand(0).getReg();
-      Register OutVL = MI.getOperand(0).getReg();
-      // We can't remove if the previous VSETVLI left VL unchanged and the
-      // current instruction is setting it to VLMAX. Without knowing the VL
-      // before the previous instruction we don't know if this is a change.
-      if (PrevOutVL == RISCV::X0 && OutVL != RISCV::X0) {
-        PrevVSETVLI = &MI;
-        continue;
-      }
-    }
-
-    // This VSETVLI is redundant, remove it.
-    MI.eraseFromParent();
-    Changed = true;
   }
 
   return Changed;

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll
index c65c07ad8de32..da2eb2dca04e6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll
@@ -17,7 +17,6 @@ define half @extractelt_nxv1f16_imm(<vscale x 1 x half> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x half> %v, i32 2
@@ -29,7 +28,6 @@ define half @extractelt_nxv1f16_idx(<vscale x 1 x half> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x half> %v, i32 %idx
@@ -51,7 +49,6 @@ define half @extractelt_nxv2f16_imm(<vscale x 2 x half> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x half> %v, i32 2
@@ -63,7 +60,6 @@ define half @extractelt_nxv2f16_idx(<vscale x 2 x half> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x half> %v, i32 %idx
@@ -85,7 +81,6 @@ define half @extractelt_nxv4f16_imm(<vscale x 4 x half> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x half> %v, i32 2
@@ -97,7 +92,6 @@ define half @extractelt_nxv4f16_idx(<vscale x 4 x half> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x half> %v, i32 %idx
@@ -119,7 +113,6 @@ define half @extractelt_nxv8f16_imm(<vscale x 8 x half> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x half> %v, i32 2
@@ -131,7 +124,6 @@ define half @extractelt_nxv8f16_idx(<vscale x 8 x half> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x half> %v, i32 %idx
@@ -153,7 +145,6 @@ define half @extractelt_nxv16f16_imm(<vscale x 16 x half> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x half> %v, i32 2
@@ -165,7 +156,6 @@ define half @extractelt_nxv16f16_idx(<vscale x 16 x half> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x half> %v, i32 %idx
@@ -187,7 +177,6 @@ define half @extractelt_nxv32f16_imm(<vscale x 32 x half> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 32 x half> %v, i32 2
@@ -199,7 +188,6 @@ define half @extractelt_nxv32f16_idx(<vscale x 32 x half> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 32 x half> %v, i32 %idx
@@ -221,7 +209,6 @@ define float @extractelt_nxv1f32_imm(<vscale x 1 x float> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x float> %v, i32 2
@@ -233,7 +220,6 @@ define float @extractelt_nxv1f32_idx(<vscale x 1 x float> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x float> %v, i32 %idx
@@ -255,7 +241,6 @@ define float @extractelt_nxv2f32_imm(<vscale x 2 x float> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x float> %v, i32 2
@@ -267,7 +252,6 @@ define float @extractelt_nxv2f32_idx(<vscale x 2 x float> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x float> %v, i32 %idx
@@ -289,7 +273,6 @@ define float @extractelt_nxv4f32_imm(<vscale x 4 x float> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x float> %v, i32 2
@@ -301,7 +284,6 @@ define float @extractelt_nxv4f32_idx(<vscale x 4 x float> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x float> %v, i32 %idx
@@ -323,7 +305,6 @@ define float @extractelt_nxv8f32_imm(<vscale x 8 x float> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x float> %v, i32 2
@@ -335,7 +316,6 @@ define float @extractelt_nxv8f32_idx(<vscale x 8 x float> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x float> %v, i32 %idx
@@ -357,7 +337,6 @@ define float @extractelt_nxv16f32_imm(<vscale x 16 x float> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x float> %v, i32 2
@@ -369,7 +348,6 @@ define float @extractelt_nxv16f32_idx(<vscale x 16 x float> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x float> %v, i32 %idx
@@ -391,7 +369,6 @@ define double @extractelt_nxv1f64_imm(<vscale x 1 x double> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x double> %v, i32 2
@@ -403,7 +380,6 @@ define double @extractelt_nxv1f64_idx(<vscale x 1 x double> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x double> %v, i32 %idx
@@ -425,7 +401,6 @@ define double @extractelt_nxv2f64_imm(<vscale x 2 x double> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x double> %v, i32 2
@@ -437,7 +412,6 @@ define double @extractelt_nxv2f64_idx(<vscale x 2 x double> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e64,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x double> %v, i32 %idx
@@ -459,7 +433,6 @@ define double @extractelt_nxv4f64_imm(<vscale x 4 x double> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e64,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x double> %v, i32 2
@@ -471,7 +444,6 @@ define double @extractelt_nxv4f64_idx(<vscale x 4 x double> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e64,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x double> %v, i32 %idx
@@ -493,7 +465,6 @@ define double @extractelt_nxv8f64_imm(<vscale x 8 x double> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e64,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x double> %v, i32 2
@@ -505,7 +476,6 @@ define double @extractelt_nxv8f64_idx(<vscale x 8 x double> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e64,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x double> %v, i32 %idx

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll
index 0245626c630be..61a77cf02189b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll
@@ -17,7 +17,6 @@ define half @extractelt_nxv1f16_imm(<vscale x 1 x half> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x half> %v, i32 2
@@ -29,7 +28,6 @@ define half @extractelt_nxv1f16_idx(<vscale x 1 x half> %v, i32 signext %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x half> %v, i32 %idx
@@ -51,7 +49,6 @@ define half @extractelt_nxv2f16_imm(<vscale x 2 x half> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x half> %v, i32 2
@@ -63,7 +60,6 @@ define half @extractelt_nxv2f16_idx(<vscale x 2 x half> %v, i32 signext %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x half> %v, i32 %idx
@@ -85,7 +81,6 @@ define half @extractelt_nxv4f16_imm(<vscale x 4 x half> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x half> %v, i32 2
@@ -97,7 +92,6 @@ define half @extractelt_nxv4f16_idx(<vscale x 4 x half> %v, i32 signext %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x half> %v, i32 %idx
@@ -119,7 +113,6 @@ define half @extractelt_nxv8f16_imm(<vscale x 8 x half> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x half> %v, i32 2
@@ -131,7 +124,6 @@ define half @extractelt_nxv8f16_idx(<vscale x 8 x half> %v, i32 signext %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x half> %v, i32 %idx
@@ -153,7 +145,6 @@ define half @extractelt_nxv16f16_imm(<vscale x 16 x half> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x half> %v, i32 2
@@ -165,7 +156,6 @@ define half @extractelt_nxv16f16_idx(<vscale x 16 x half> %v, i32 signext %idx)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x half> %v, i32 %idx
@@ -187,7 +177,6 @@ define half @extractelt_nxv32f16_imm(<vscale x 32 x half> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 32 x half> %v, i32 2
@@ -199,7 +188,6 @@ define half @extractelt_nxv32f16_idx(<vscale x 32 x half> %v, i32 signext %idx)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 32 x half> %v, i32 %idx
@@ -221,7 +209,6 @@ define float @extractelt_nxv1f32_imm(<vscale x 1 x float> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x float> %v, i32 2
@@ -233,7 +220,6 @@ define float @extractelt_nxv1f32_idx(<vscale x 1 x float> %v, i32 signext %idx)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x float> %v, i32 %idx
@@ -255,7 +241,6 @@ define float @extractelt_nxv2f32_imm(<vscale x 2 x float> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x float> %v, i32 2
@@ -267,7 +252,6 @@ define float @extractelt_nxv2f32_idx(<vscale x 2 x float> %v, i32 signext %idx)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x float> %v, i32 %idx
@@ -289,7 +273,6 @@ define float @extractelt_nxv4f32_imm(<vscale x 4 x float> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x float> %v, i32 2
@@ -301,7 +284,6 @@ define float @extractelt_nxv4f32_idx(<vscale x 4 x float> %v, i32 signext %idx)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x float> %v, i32 %idx
@@ -323,7 +305,6 @@ define float @extractelt_nxv8f32_imm(<vscale x 8 x float> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x float> %v, i32 2
@@ -335,7 +316,6 @@ define float @extractelt_nxv8f32_idx(<vscale x 8 x float> %v, i32 signext %idx)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x float> %v, i32 %idx
@@ -357,7 +337,6 @@ define float @extractelt_nxv16f32_imm(<vscale x 16 x float> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x float> %v, i32 2
@@ -369,7 +348,6 @@ define float @extractelt_nxv16f32_idx(<vscale x 16 x float> %v, i32 signext %idx
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x float> %v, i32 %idx
@@ -391,7 +369,6 @@ define double @extractelt_nxv1f64_imm(<vscale x 1 x double> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x double> %v, i32 2
@@ -403,7 +380,6 @@ define double @extractelt_nxv1f64_idx(<vscale x 1 x double> %v, i32 signext %idx
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x double> %v, i32 %idx
@@ -425,7 +401,6 @@ define double @extractelt_nxv2f64_imm(<vscale x 2 x double> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x double> %v, i32 2
@@ -437,7 +412,6 @@ define double @extractelt_nxv2f64_idx(<vscale x 2 x double> %v, i32 signext %idx
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e64,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x double> %v, i32 %idx
@@ -459,7 +433,6 @@ define double @extractelt_nxv4f64_imm(<vscale x 4 x double> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e64,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x double> %v, i32 2
@@ -471,7 +444,6 @@ define double @extractelt_nxv4f64_idx(<vscale x 4 x double> %v, i32 signext %idx
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e64,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x double> %v, i32 %idx
@@ -493,7 +465,6 @@ define double @extractelt_nxv8f64_imm(<vscale x 8 x double> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e64,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x double> %v, i32 2
@@ -505,7 +476,6 @@ define double @extractelt_nxv8f64_idx(<vscale x 8 x double> %v, i32 signext %idx
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e64,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x double> %v, i32 %idx

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
index b75c83a1575c3..80c6b046c6e40 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
@@ -17,7 +17,6 @@ define signext i8 @extractelt_nxv1i8_imm(<vscale x 1 x i8> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x i8> %v, i32 2
@@ -29,7 +28,6 @@ define signext i8 @extractelt_nxv1i8_idx(<vscale x 1 x i8> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x i8> %v, i32 %idx
@@ -51,7 +49,6 @@ define signext i8 @extractelt_nxv2i8_imm(<vscale x 2 x i8> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x i8> %v, i32 2
@@ -63,7 +60,6 @@ define signext i8 @extractelt_nxv2i8_idx(<vscale x 2 x i8> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x i8> %v, i32 %idx
@@ -85,7 +81,6 @@ define signext i8 @extractelt_nxv4i8_imm(<vscale x 4 x i8> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x i8> %v, i32 2
@@ -97,7 +92,6 @@ define signext i8 @extractelt_nxv4i8_idx(<vscale x 4 x i8> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x i8> %v, i32 %idx
@@ -119,7 +113,6 @@ define signext i8 @extractelt_nxv8i8_imm(<vscale x 8 x i8> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e8,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x i8> %v, i32 2
@@ -131,7 +124,6 @@ define signext i8 @extractelt_nxv8i8_idx(<vscale x 8 x i8> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e8,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x i8> %v, i32 %idx
@@ -153,7 +145,6 @@ define signext i8 @extractelt_nxv16i8_imm(<vscale x 16 x i8> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e8,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x i8> %v, i32 2
@@ -165,7 +156,6 @@ define signext i8 @extractelt_nxv16i8_idx(<vscale x 16 x i8> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e8,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x i8> %v, i32 %idx
@@ -187,7 +177,6 @@ define signext i8 @extractelt_nxv32i8_imm(<vscale x 32 x i8> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e8,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 32 x i8> %v, i32 2
@@ -199,7 +188,6 @@ define signext i8 @extractelt_nxv32i8_idx(<vscale x 32 x i8> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e8,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 32 x i8> %v, i32 %idx
@@ -221,7 +209,6 @@ define signext i8 @extractelt_nxv64i8_imm(<vscale x 64 x i8> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e8,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 64 x i8> %v, i32 2
@@ -233,7 +220,6 @@ define signext i8 @extractelt_nxv64i8_idx(<vscale x 64 x i8> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e8,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 64 x i8> %v, i32 %idx
@@ -255,7 +241,6 @@ define signext i16 @extractelt_nxv1i16_imm(<vscale x 1 x i16> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x i16> %v, i32 2
@@ -267,7 +252,6 @@ define signext i16 @extractelt_nxv1i16_idx(<vscale x 1 x i16> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x i16> %v, i32 %idx
@@ -289,7 +273,6 @@ define signext i16 @extractelt_nxv2i16_imm(<vscale x 2 x i16> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x i16> %v, i32 2
@@ -301,7 +284,6 @@ define signext i16 @extractelt_nxv2i16_idx(<vscale x 2 x i16> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x i16> %v, i32 %idx
@@ -323,7 +305,6 @@ define signext i16 @extractelt_nxv4i16_imm(<vscale x 4 x i16> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x i16> %v, i32 2
@@ -335,7 +316,6 @@ define signext i16 @extractelt_nxv4i16_idx(<vscale x 4 x i16> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x i16> %v, i32 %idx
@@ -357,7 +337,6 @@ define signext i16 @extractelt_nxv8i16_imm(<vscale x 8 x i16> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x i16> %v, i32 2
@@ -369,7 +348,6 @@ define signext i16 @extractelt_nxv8i16_idx(<vscale x 8 x i16> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x i16> %v, i32 %idx
@@ -391,7 +369,6 @@ define signext i16 @extractelt_nxv16i16_imm(<vscale x 16 x i16> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x i16> %v, i32 2
@@ -403,7 +380,6 @@ define signext i16 @extractelt_nxv16i16_idx(<vscale x 16 x i16> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x i16> %v, i32 %idx
@@ -425,7 +401,6 @@ define signext i16 @extractelt_nxv32i16_imm(<vscale x 32 x i16> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 32 x i16> %v, i32 2
@@ -437,7 +412,6 @@ define signext i16 @extractelt_nxv32i16_idx(<vscale x 32 x i16> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 32 x i16> %v, i32 %idx
@@ -459,7 +433,6 @@ define i32 @extractelt_nxv1i32_imm(<vscale x 1 x i32> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x i32> %v, i32 2
@@ -471,7 +444,6 @@ define i32 @extractelt_nxv1i32_idx(<vscale x 1 x i32> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x i32> %v, i32 %idx
@@ -493,7 +465,6 @@ define i32 @extractelt_nxv2i32_imm(<vscale x 2 x i32> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x i32> %v, i32 2
@@ -505,7 +476,6 @@ define i32 @extractelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x i32> %v, i32 %idx
@@ -527,7 +497,6 @@ define i32 @extractelt_nxv4i32_imm(<vscale x 4 x i32> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x i32> %v, i32 2
@@ -539,7 +508,6 @@ define i32 @extractelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x i32> %v, i32 %idx
@@ -561,7 +529,6 @@ define i32 @extractelt_nxv8i32_imm(<vscale x 8 x i32> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x i32> %v, i32 2
@@ -573,7 +540,6 @@ define i32 @extractelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x i32> %v, i32 %idx
@@ -595,7 +561,6 @@ define i32 @extractelt_nxv16i32_imm(<vscale x 16 x i32> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x i32> %v, i32 2
@@ -607,7 +572,6 @@ define i32 @extractelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x i32> %v, i32 %idx
@@ -620,7 +584,6 @@ define i64 @extractelt_nxv1i64_0(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    addi a0, zero, 32
 ; CHECK-NEXT:    vsetivli a1, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
@@ -633,12 +596,9 @@ define i64 @extractelt_nxv1i64_imm(<vscale x 1 x i64> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x i64> %v, i32 2
@@ -650,12 +610,9 @@ define i64 @extractelt_nxv1i64_idx(<vscale x 1 x i64> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x i64> %v, i32 %idx
@@ -668,7 +625,6 @@ define i64 @extractelt_nxv2i64_0(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    addi a0, zero, 32
 ; CHECK-NEXT:    vsetivli a1, 1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v26, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v26
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
@@ -681,12 +637,9 @@ define i64 @extractelt_nxv2i64_imm(<vscale x 2 x i64> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
 ; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsetivli a2, 1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v26, v26, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x i64> %v, i32 2
@@ -698,12 +651,9 @@ define i64 @extractelt_nxv2i64_idx(<vscale x 2 x i64> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e64,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
 ; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsetivli a2, 1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v26, v26, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x i64> %v, i32 %idx
@@ -716,7 +666,6 @@ define i64 @extractelt_nxv4i64_0(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    addi a0, zero, 32
 ; CHECK-NEXT:    vsetivli a1, 1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v28, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v28
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
@@ -729,12 +678,9 @@ define i64 @extractelt_nxv4i64_imm(<vscale x 4 x i64> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e64,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
 ; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsetivli a2, 1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v28, v28, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x i64> %v, i32 2
@@ -746,12 +692,9 @@ define i64 @extractelt_nxv4i64_idx(<vscale x 4 x i64> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e64,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
 ; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsetivli a2, 1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v28, v28, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x i64> %v, i32 %idx
@@ -764,7 +707,6 @@ define i64 @extractelt_nxv8i64_0(<vscale x 8 x i64> %v) {
 ; CHECK-NEXT:    addi a0, zero, 32
 ; CHECK-NEXT:    vsetivli a1, 1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v16, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v16
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
@@ -777,12 +719,9 @@ define i64 @extractelt_nxv8i64_imm(<vscale x 8 x i64> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e64,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsetivli a2, 1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x i64> %v, i32 2
@@ -794,12 +733,9 @@ define i64 @extractelt_nxv8i64_idx(<vscale x 8 x i64> %v, i32 %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e64,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsetivli a2, 1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x i64> %v, i32 %idx

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
index 9a34a7e8aec1f..05c98d9927ff9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
@@ -17,7 +17,6 @@ define signext i8 @extractelt_nxv1i8_imm(<vscale x 1 x i8> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x i8> %v, i32 2
@@ -29,7 +28,6 @@ define signext i8 @extractelt_nxv1i8_idx(<vscale x 1 x i8> %v, i32 signext %idx)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x i8> %v, i32 %idx
@@ -51,7 +49,6 @@ define signext i8 @extractelt_nxv2i8_imm(<vscale x 2 x i8> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x i8> %v, i32 2
@@ -63,7 +60,6 @@ define signext i8 @extractelt_nxv2i8_idx(<vscale x 2 x i8> %v, i32 signext %idx)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x i8> %v, i32 %idx
@@ -85,7 +81,6 @@ define signext i8 @extractelt_nxv4i8_imm(<vscale x 4 x i8> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x i8> %v, i32 2
@@ -97,7 +92,6 @@ define signext i8 @extractelt_nxv4i8_idx(<vscale x 4 x i8> %v, i32 signext %idx)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x i8> %v, i32 %idx
@@ -119,7 +113,6 @@ define signext i8 @extractelt_nxv8i8_imm(<vscale x 8 x i8> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e8,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x i8> %v, i32 2
@@ -131,7 +124,6 @@ define signext i8 @extractelt_nxv8i8_idx(<vscale x 8 x i8> %v, i32 signext %idx)
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e8,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x i8> %v, i32 %idx
@@ -153,7 +145,6 @@ define signext i8 @extractelt_nxv16i8_imm(<vscale x 16 x i8> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e8,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x i8> %v, i32 2
@@ -165,7 +156,6 @@ define signext i8 @extractelt_nxv16i8_idx(<vscale x 16 x i8> %v, i32 signext %id
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e8,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x i8> %v, i32 %idx
@@ -187,7 +177,6 @@ define signext i8 @extractelt_nxv32i8_imm(<vscale x 32 x i8> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e8,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 32 x i8> %v, i32 2
@@ -199,7 +188,6 @@ define signext i8 @extractelt_nxv32i8_idx(<vscale x 32 x i8> %v, i32 signext %id
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e8,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 32 x i8> %v, i32 %idx
@@ -221,7 +209,6 @@ define signext i8 @extractelt_nxv64i8_imm(<vscale x 64 x i8> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e8,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 64 x i8> %v, i32 2
@@ -233,7 +220,6 @@ define signext i8 @extractelt_nxv64i8_idx(<vscale x 64 x i8> %v, i32 signext %id
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e8,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 64 x i8> %v, i32 %idx
@@ -255,7 +241,6 @@ define signext i16 @extractelt_nxv1i16_imm(<vscale x 1 x i16> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x i16> %v, i32 2
@@ -267,7 +252,6 @@ define signext i16 @extractelt_nxv1i16_idx(<vscale x 1 x i16> %v, i32 signext %i
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x i16> %v, i32 %idx
@@ -289,7 +273,6 @@ define signext i16 @extractelt_nxv2i16_imm(<vscale x 2 x i16> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x i16> %v, i32 2
@@ -301,7 +284,6 @@ define signext i16 @extractelt_nxv2i16_idx(<vscale x 2 x i16> %v, i32 signext %i
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x i16> %v, i32 %idx
@@ -323,7 +305,6 @@ define signext i16 @extractelt_nxv4i16_imm(<vscale x 4 x i16> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x i16> %v, i32 2
@@ -335,7 +316,6 @@ define signext i16 @extractelt_nxv4i16_idx(<vscale x 4 x i16> %v, i32 signext %i
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x i16> %v, i32 %idx
@@ -357,7 +337,6 @@ define signext i16 @extractelt_nxv8i16_imm(<vscale x 8 x i16> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x i16> %v, i32 2
@@ -369,7 +348,6 @@ define signext i16 @extractelt_nxv8i16_idx(<vscale x 8 x i16> %v, i32 signext %i
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x i16> %v, i32 %idx
@@ -391,7 +369,6 @@ define signext i16 @extractelt_nxv16i16_imm(<vscale x 16 x i16> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x i16> %v, i32 2
@@ -403,7 +380,6 @@ define signext i16 @extractelt_nxv16i16_idx(<vscale x 16 x i16> %v, i32 signext
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x i16> %v, i32 %idx
@@ -425,7 +401,6 @@ define signext i16 @extractelt_nxv32i16_imm(<vscale x 32 x i16> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 32 x i16> %v, i32 2
@@ -437,7 +412,6 @@ define signext i16 @extractelt_nxv32i16_idx(<vscale x 32 x i16> %v, i32 signext
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 32 x i16> %v, i32 %idx
@@ -459,7 +433,6 @@ define signext i32 @extractelt_nxv1i32_imm(<vscale x 1 x i32> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x i32> %v, i32 2
@@ -471,7 +444,6 @@ define signext i32 @extractelt_nxv1i32_idx(<vscale x 1 x i32> %v, i32 signext %i
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x i32> %v, i32 %idx
@@ -493,7 +465,6 @@ define signext i32 @extractelt_nxv2i32_imm(<vscale x 2 x i32> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x i32> %v, i32 2
@@ -505,7 +476,6 @@ define signext i32 @extractelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 signext %i
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x i32> %v, i32 %idx
@@ -527,7 +497,6 @@ define signext i32 @extractelt_nxv4i32_imm(<vscale x 4 x i32> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x i32> %v, i32 2
@@ -539,7 +508,6 @@ define signext i32 @extractelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 signext %i
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x i32> %v, i32 %idx
@@ -561,7 +529,6 @@ define signext i32 @extractelt_nxv8i32_imm(<vscale x 8 x i32> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x i32> %v, i32 2
@@ -573,7 +540,6 @@ define signext i32 @extractelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 signext %i
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x i32> %v, i32 %idx
@@ -595,7 +561,6 @@ define signext i32 @extractelt_nxv16i32_imm(<vscale x 16 x i32> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x i32> %v, i32 2
@@ -607,7 +572,6 @@ define signext i32 @extractelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 signext
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 16 x i32> %v, i32 %idx
@@ -629,7 +593,6 @@ define i64 @extractelt_nxv1i64_imm(<vscale x 1 x i64> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x i64> %v, i32 2
@@ -641,7 +604,6 @@ define i64 @extractelt_nxv1i64_idx(<vscale x 1 x i64> %v, i32 signext %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 1 x i64> %v, i32 %idx
@@ -663,7 +625,6 @@ define i64 @extractelt_nxv2i64_imm(<vscale x 2 x i64> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x i64> %v, i32 2
@@ -675,7 +636,6 @@ define i64 @extractelt_nxv2i64_idx(<vscale x 2 x i64> %v, i32 signext %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e64,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 2 x i64> %v, i32 %idx
@@ -697,7 +657,6 @@ define i64 @extractelt_nxv4i64_imm(<vscale x 4 x i64> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e64,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x i64> %v, i32 2
@@ -709,7 +668,6 @@ define i64 @extractelt_nxv4i64_idx(<vscale x 4 x i64> %v, i32 signext %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e64,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 4 x i64> %v, i32 %idx
@@ -731,7 +689,6 @@ define i64 @extractelt_nxv8i64_imm(<vscale x 8 x i64> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a0, 1, e64,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x i64> %v, i32 2
@@ -743,7 +700,6 @@ define i64 @extractelt_nxv8i64_idx(<vscale x 8 x i64> %v, i32 signext %idx) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 1, e64,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
   %r = extractelement <vscale x 8 x i64> %v, i32 %idx

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
index ba325abb32dca..fb4fbe0de39fe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
@@ -9,7 +9,6 @@ define i8 @extractelt_v16i8(<16 x i8>* %x) nounwind {
 ; RV32-NEXT:    vle8.v v25, (a0)
 ; RV32-NEXT:    vsetivli a0, 1, e8,m1,ta,mu
 ; RV32-NEXT:    vslidedown.vi v25, v25, 7
-; RV32-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; RV32-NEXT:    vmv.x.s a0, v25
 ; RV32-NEXT:    ret
 ;
@@ -19,7 +18,6 @@ define i8 @extractelt_v16i8(<16 x i8>* %x) nounwind {
 ; RV64-NEXT:    vle8.v v25, (a0)
 ; RV64-NEXT:    vsetivli a0, 1, e8,m1,ta,mu
 ; RV64-NEXT:    vslidedown.vi v25, v25, 7
-; RV64-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; RV64-NEXT:    vmv.x.s a0, v25
 ; RV64-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
@@ -34,7 +32,6 @@ define i16 @extractelt_v8i16(<8 x i16>* %x) nounwind {
 ; RV32-NEXT:    vle16.v v25, (a0)
 ; RV32-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
 ; RV32-NEXT:    vslidedown.vi v25, v25, 7
-; RV32-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
 ; RV32-NEXT:    vmv.x.s a0, v25
 ; RV32-NEXT:    ret
 ;
@@ -44,7 +41,6 @@ define i16 @extractelt_v8i16(<8 x i16>* %x) nounwind {
 ; RV64-NEXT:    vle16.v v25, (a0)
 ; RV64-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
 ; RV64-NEXT:    vslidedown.vi v25, v25, 7
-; RV64-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
 ; RV64-NEXT:    vmv.x.s a0, v25
 ; RV64-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
@@ -59,7 +55,6 @@ define i32 @extractelt_v4i32(<4 x i32>* %x) nounwind {
 ; RV32-NEXT:    vle32.v v25, (a0)
 ; RV32-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
 ; RV32-NEXT:    vslidedown.vi v25, v25, 2
-; RV32-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; RV32-NEXT:    vmv.x.s a0, v25
 ; RV32-NEXT:    ret
 ;
@@ -69,7 +64,6 @@ define i32 @extractelt_v4i32(<4 x i32>* %x) nounwind {
 ; RV64-NEXT:    vle32.v v25, (a0)
 ; RV64-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
 ; RV64-NEXT:    vslidedown.vi v25, v25, 2
-; RV64-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; RV64-NEXT:    vmv.x.s a0, v25
 ; RV64-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
@@ -82,12 +76,10 @@ define i64 @extractelt_v2i64(<2 x i64>* %x) nounwind {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; RV32-NEXT:    vle64.v v25, (a0)
-; RV32-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; RV32-NEXT:    vmv.x.s a0, v25
 ; RV32-NEXT:    addi a1, zero, 32
 ; RV32-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; RV32-NEXT:    vsrl.vx v25, v25, a1
-; RV32-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; RV32-NEXT:    vmv.x.s a1, v25
 ; RV32-NEXT:    ret
 ;
@@ -95,7 +87,6 @@ define i64 @extractelt_v2i64(<2 x i64>* %x) nounwind {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; RV64-NEXT:    vle64.v v25, (a0)
-; RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; RV64-NEXT:    vmv.x.s a0, v25
 ; RV64-NEXT:    ret
   %a = load <2 x i64>, <2 x i64>* %x
@@ -111,7 +102,6 @@ define i8 @extractelt_v32i8(<32 x i8>* %x) nounwind {
 ; RV32-NEXT:    vle8.v v26, (a0)
 ; RV32-NEXT:    vsetivli a0, 1, e8,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v26, v26, 7
-; RV32-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a0, v26
 ; RV32-NEXT:    ret
 ;
@@ -122,7 +112,6 @@ define i8 @extractelt_v32i8(<32 x i8>* %x) nounwind {
 ; RV64-NEXT:    vle8.v v26, (a0)
 ; RV64-NEXT:    vsetivli a0, 1, e8,m2,ta,mu
 ; RV64-NEXT:    vslidedown.vi v26, v26, 7
-; RV64-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; RV64-NEXT:    vmv.x.s a0, v26
 ; RV64-NEXT:    ret
   %a = load <32 x i8>, <32 x i8>* %x
@@ -137,7 +126,6 @@ define i16 @extractelt_v16i16(<16 x i16>* %x) nounwind {
 ; RV32-NEXT:    vle16.v v26, (a0)
 ; RV32-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v26, v26, 7
-; RV32-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a0, v26
 ; RV32-NEXT:    ret
 ;
@@ -147,7 +135,6 @@ define i16 @extractelt_v16i16(<16 x i16>* %x) nounwind {
 ; RV64-NEXT:    vle16.v v26, (a0)
 ; RV64-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
 ; RV64-NEXT:    vslidedown.vi v26, v26, 7
-; RV64-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; RV64-NEXT:    vmv.x.s a0, v26
 ; RV64-NEXT:    ret
   %a = load <16 x i16>, <16 x i16>* %x
@@ -162,7 +149,6 @@ define i32 @extractelt_v8i32(<8 x i32>* %x) nounwind {
 ; RV32-NEXT:    vle32.v v26, (a0)
 ; RV32-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v26, v26, 6
-; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a0, v26
 ; RV32-NEXT:    ret
 ;
@@ -172,7 +158,6 @@ define i32 @extractelt_v8i32(<8 x i32>* %x) nounwind {
 ; RV64-NEXT:    vle32.v v26, (a0)
 ; RV64-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
 ; RV64-NEXT:    vslidedown.vi v26, v26, 6
-; RV64-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; RV64-NEXT:    vmv.x.s a0, v26
 ; RV64-NEXT:    ret
   %a = load <8 x i32>, <8 x i32>* %x
@@ -187,12 +172,9 @@ define i64 @extractelt_v4i64(<4 x i64>* %x) nounwind {
 ; RV32-NEXT:    vle64.v v26, (a0)
 ; RV32-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v26, v26, 3
-; RV32-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a0, v26
 ; RV32-NEXT:    addi a1, zero, 32
-; RV32-NEXT:    vsetivli a2, 1, e64,m2,ta,mu
 ; RV32-NEXT:    vsrl.vx v26, v26, a1
-; RV32-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a1, v26
 ; RV32-NEXT:    ret
 ;
@@ -202,7 +184,6 @@ define i64 @extractelt_v4i64(<4 x i64>* %x) nounwind {
 ; RV64-NEXT:    vle64.v v26, (a0)
 ; RV64-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
 ; RV64-NEXT:    vslidedown.vi v26, v26, 3
-; RV64-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; RV64-NEXT:    vmv.x.s a0, v26
 ; RV64-NEXT:    ret
   %a = load <4 x i64>, <4 x i64>* %x
@@ -221,11 +202,8 @@ define i64 @extractelt_v3i64(<3 x i64>* %x) nounwind {
 ; RV32-NEXT:    vle32.v v26, (a0)
 ; RV32-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v28, v26, 4
-; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a0, v28
-; RV32-NEXT:    vsetivli a1, 1, e32,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v26, v26, 5
-; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a1, v26
 ; RV32-NEXT:    ret
 ;
@@ -235,7 +213,6 @@ define i64 @extractelt_v3i64(<3 x i64>* %x) nounwind {
 ; RV64-NEXT:    vle64.v v26, (a0)
 ; RV64-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
 ; RV64-NEXT:    vslidedown.vi v26, v26, 2
-; RV64-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; RV64-NEXT:    vmv.x.s a0, v26
 ; RV64-NEXT:    ret
   %a = load <3 x i64>, <3 x i64>* %x

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
index 3d329c75ab388..20ecec0b85576 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
@@ -21,44 +21,30 @@ define void @insertelt_v4i64(<4 x i64>* %x, i64 %y) {
 ; RV32-NEXT:    sw a1, 64(sp)
 ; RV32-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
 ; RV32-NEXT:    vle32.v v26, (a0)
-; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a1, v26
 ; RV32-NEXT:    sw a1, 0(sp)
-; RV32-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
 ; RV32-NEXT:    addi a1, sp, 32
 ; RV32-NEXT:    vle32.v v28, (a1)
-; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a1, v28
 ; RV32-NEXT:    sw a1, 28(sp)
-; RV32-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
 ; RV32-NEXT:    addi a1, sp, 64
 ; RV32-NEXT:    vle32.v v28, (a1)
-; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a1, v28
 ; RV32-NEXT:    sw a1, 24(sp)
 ; RV32-NEXT:    vsetivli a1, 1, e32,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v28, v26, 5
-; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a1, v28
 ; RV32-NEXT:    sw a1, 20(sp)
-; RV32-NEXT:    vsetivli a1, 1, e32,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v28, v26, 4
-; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a1, v28
 ; RV32-NEXT:    sw a1, 16(sp)
-; RV32-NEXT:    vsetivli a1, 1, e32,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v28, v26, 3
-; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a1, v28
 ; RV32-NEXT:    sw a1, 12(sp)
-; RV32-NEXT:    vsetivli a1, 1, e32,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v28, v26, 2
-; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a1, v28
 ; RV32-NEXT:    sw a1, 8(sp)
-; RV32-NEXT:    vsetivli a1, 1, e32,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v26, v26, 1
-; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a1, v26
 ; RV32-NEXT:    sw a1, 4(sp)
 ; RV32-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
@@ -84,23 +70,17 @@ define void @insertelt_v4i64(<4 x i64>* %x, i64 %y) {
 ; RV64-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; RV64-NEXT:    vle64.v v26, (a0)
 ; RV64-NEXT:    sd a1, 32(sp)
-; RV64-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; RV64-NEXT:    vmv.x.s a1, v26
 ; RV64-NEXT:    sd a1, 0(sp)
-; RV64-NEXT:    vsetivli a1, 4, e64,m2,ta,mu
 ; RV64-NEXT:    addi a1, sp, 32
 ; RV64-NEXT:    vle64.v v28, (a1)
-; RV64-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; RV64-NEXT:    vmv.x.s a1, v28
 ; RV64-NEXT:    sd a1, 24(sp)
 ; RV64-NEXT:    vsetivli a1, 1, e64,m2,ta,mu
 ; RV64-NEXT:    vslidedown.vi v28, v26, 2
-; RV64-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; RV64-NEXT:    vmv.x.s a1, v28
 ; RV64-NEXT:    sd a1, 16(sp)
-; RV64-NEXT:    vsetivli a1, 1, e64,m2,ta,mu
 ; RV64-NEXT:    vslidedown.vi v26, v26, 1
-; RV64-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; RV64-NEXT:    vmv.x.s a1, v26
 ; RV64-NEXT:    sd a1, 8(sp)
 ; RV64-NEXT:    vsetivli a1, 4, e64,m2,ta,mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll
index f9adf118a4678..2e04b90853dd5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll
@@ -1157,7 +1157,6 @@ define i64 @vreduce_add_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    addi a1, zero, 32
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.add.nxv1i64(<vscale x 1 x i64> %v)
@@ -1176,7 +1175,6 @@ define i64 @vreduce_umax_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    addi a1, zero, 32
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.umax.nxv1i64(<vscale x 1 x i64> %v)
@@ -1201,7 +1199,6 @@ define i64 @vreduce_smax_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.smax.nxv1i64(<vscale x 1 x i64> %v)
@@ -1220,7 +1217,6 @@ define i64 @vreduce_umin_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    addi a1, zero, 32
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.umin.nxv1i64(<vscale x 1 x i64> %v)
@@ -1246,7 +1242,6 @@ define i64 @vreduce_smin_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.smin.nxv1i64(<vscale x 1 x i64> %v)
@@ -1265,7 +1260,6 @@ define i64 @vreduce_and_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    addi a1, zero, 32
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.and.nxv1i64(<vscale x 1 x i64> %v)
@@ -1284,7 +1278,6 @@ define i64 @vreduce_or_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    addi a1, zero, 32
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.or.nxv1i64(<vscale x 1 x i64> %v)
@@ -1303,7 +1296,6 @@ define i64 @vreduce_xor_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    addi a1, zero, 32
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.xor.nxv1i64(<vscale x 1 x i64> %v)
@@ -1324,7 +1316,6 @@ define i64 @vreduce_add_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    addi a1, zero, 32
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %v)
@@ -1345,7 +1336,6 @@ define i64 @vreduce_umax_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    addi a1, zero, 32
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.umax.nxv2i64(<vscale x 2 x i64> %v)
@@ -1372,7 +1362,6 @@ define i64 @vreduce_smax_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.smax.nxv2i64(<vscale x 2 x i64> %v)
@@ -1393,7 +1382,6 @@ define i64 @vreduce_umin_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    addi a1, zero, 32
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.umin.nxv2i64(<vscale x 2 x i64> %v)
@@ -1421,7 +1409,6 @@ define i64 @vreduce_smin_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.smin.nxv2i64(<vscale x 2 x i64> %v)
@@ -1442,7 +1429,6 @@ define i64 @vreduce_and_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    addi a1, zero, 32
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.and.nxv2i64(<vscale x 2 x i64> %v)
@@ -1463,7 +1449,6 @@ define i64 @vreduce_or_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    addi a1, zero, 32
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.or.nxv2i64(<vscale x 2 x i64> %v)
@@ -1484,7 +1469,6 @@ define i64 @vreduce_xor_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    addi a1, zero, 32
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.xor.nxv2i64(<vscale x 2 x i64> %v)
@@ -1505,7 +1489,6 @@ define i64 @vreduce_add_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    addi a1, zero, 32
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> %v)
@@ -1526,7 +1509,6 @@ define i64 @vreduce_umax_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    addi a1, zero, 32
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.umax.nxv4i64(<vscale x 4 x i64> %v)
@@ -1553,7 +1535,6 @@ define i64 @vreduce_smax_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.smax.nxv4i64(<vscale x 4 x i64> %v)
@@ -1574,7 +1555,6 @@ define i64 @vreduce_umin_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    addi a1, zero, 32
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.umin.nxv4i64(<vscale x 4 x i64> %v)
@@ -1602,7 +1582,6 @@ define i64 @vreduce_smin_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.smin.nxv4i64(<vscale x 4 x i64> %v)
@@ -1623,7 +1602,6 @@ define i64 @vreduce_and_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    addi a1, zero, 32
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.and.nxv4i64(<vscale x 4 x i64> %v)
@@ -1644,7 +1622,6 @@ define i64 @vreduce_or_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    addi a1, zero, 32
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.or.nxv4i64(<vscale x 4 x i64> %v)
@@ -1665,7 +1642,6 @@ define i64 @vreduce_xor_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    addi a1, zero, 32
 ; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
-; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.xor.nxv4i64(<vscale x 4 x i64> %v)


        


More information about the llvm-commits mailing list