[llvm] efcdd59 - [RISCV] Teach VSETVLI inserter to use VSETIVLI when possible.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 24 16:17:36 PST 2021


Author: Craig Topper
Date: 2021-02-24T16:07:33-08:00
New Revision: efcdd598b766e764a7efb48b49e9ec8b0a590510

URL: https://github.com/llvm/llvm-project/commit/efcdd598b766e764a7efb48b49e9ec8b0a590510
DIFF: https://github.com/llvm/llvm-project/commit/efcdd598b766e764a7efb48b49e9ec8b0a590510.diff

LOG: [RISCV] Teach VSETVLI inserter to use VSETIVLI when possible.

We always create the VL operand using a register, but if we can
determine that it came from an ADDI X0, imm with a sufficiently
small immediate, we can use VSETIVLI.

Reviewed By: frasercrmck

Differential Revision: https://reviews.llvm.org/D97332

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
    llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 280cd903d60e..6f2b66db26f6 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -3972,18 +3972,39 @@ static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB,
 
   MachineRegisterInfo &MRI = MF.getRegInfo();
 
-  // VL and VTYPE are alive here.
-  MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI));
+  auto BuildVSETVLI = [&]() {
+    if (VLIndex >= 0) {
+      Register DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+      Register VLReg = MI.getOperand(VLIndex).getReg();
+
+      // VL might be a compile time constant, but isel would have to put it
+      // in a register. See if VL comes from an ADDI X0, imm.
+      if (VLReg.isVirtual()) {
+        MachineInstr *Def = MRI.getVRegDef(VLReg);
+        if (Def && Def->getOpcode() == RISCV::ADDI &&
+            Def->getOperand(1).getReg() == RISCV::X0 &&
+            Def->getOperand(2).isImm()) {
+          uint64_t Imm = Def->getOperand(2).getImm();
+          // VSETIVLI allows a 5-bit zero extended immediate.
+          if (isUInt<5>(Imm))
+            return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETIVLI))
+                .addReg(DestReg, RegState::Define | RegState::Dead)
+                .addImm(Imm);
+        }
+      }
+
+      return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI))
+          .addReg(DestReg, RegState::Define | RegState::Dead)
+          .addReg(VLReg);
+    }
 
-  if (VLIndex >= 0) {
-    // Set VL (rs1 != X0).
-    Register DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
-    MIB.addReg(DestReg, RegState::Define | RegState::Dead)
-        .addReg(MI.getOperand(VLIndex).getReg());
-  } else
     // With no VL operator in the pseudo, do not modify VL (rd = X0, rs1 = X0).
-    MIB.addReg(RISCV::X0, RegState::Define | RegState::Dead)
+    return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI))
+        .addReg(RISCV::X0, RegState::Define | RegState::Dead)
         .addReg(RISCV::X0, RegState::Kill);
+  };
+
+  MachineInstrBuilder MIB = BuildVSETVLI();
 
   // Default to tail agnostic unless the destination is tied to a source. In
   // that case the user would have some control over the tail values. The tail

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll
index 0225bddd1467..c65c07ad8de3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll
@@ -15,8 +15,7 @@ define half @extractelt_nxv1f16_0(<vscale x 1 x half> %v) {
 define half @extractelt_nxv1f16_imm(<vscale x 1 x half> %v) {
 ; CHECK-LABEL: extractelt_nxv1f16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -28,8 +27,7 @@ define half @extractelt_nxv1f16_imm(<vscale x 1 x half> %v) {
 define half @extractelt_nxv1f16_idx(<vscale x 1 x half> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv1f16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -51,8 +49,7 @@ define half @extractelt_nxv2f16_0(<vscale x 2 x half> %v) {
 define half @extractelt_nxv2f16_imm(<vscale x 2 x half> %v) {
 ; CHECK-LABEL: extractelt_nxv2f16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -64,8 +61,7 @@ define half @extractelt_nxv2f16_imm(<vscale x 2 x half> %v) {
 define half @extractelt_nxv2f16_idx(<vscale x 2 x half> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv2f16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -87,8 +83,7 @@ define half @extractelt_nxv4f16_0(<vscale x 4 x half> %v) {
 define half @extractelt_nxv4f16_imm(<vscale x 4 x half> %v) {
 ; CHECK-LABEL: extractelt_nxv4f16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -100,8 +95,7 @@ define half @extractelt_nxv4f16_imm(<vscale x 4 x half> %v) {
 define half @extractelt_nxv4f16_idx(<vscale x 4 x half> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv4f16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -123,8 +117,7 @@ define half @extractelt_nxv8f16_0(<vscale x 8 x half> %v) {
 define half @extractelt_nxv8f16_imm(<vscale x 8 x half> %v) {
 ; CHECK-LABEL: extractelt_nxv8f16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
@@ -136,8 +129,7 @@ define half @extractelt_nxv8f16_imm(<vscale x 8 x half> %v) {
 define half @extractelt_nxv8f16_idx(<vscale x 8 x half> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv8f16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
@@ -159,8 +151,7 @@ define half @extractelt_nxv16f16_0(<vscale x 16 x half> %v) {
 define half @extractelt_nxv16f16_imm(<vscale x 16 x half> %v) {
 ; CHECK-LABEL: extractelt_nxv16f16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
@@ -172,8 +163,7 @@ define half @extractelt_nxv16f16_imm(<vscale x 16 x half> %v) {
 define half @extractelt_nxv16f16_idx(<vscale x 16 x half> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv16f16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
@@ -195,8 +185,7 @@ define half @extractelt_nxv32f16_0(<vscale x 32 x half> %v) {
 define half @extractelt_nxv32f16_imm(<vscale x 32 x half> %v) {
 ; CHECK-LABEL: extractelt_nxv32f16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
@@ -208,8 +197,7 @@ define half @extractelt_nxv32f16_imm(<vscale x 32 x half> %v) {
 define half @extractelt_nxv32f16_idx(<vscale x 32 x half> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv32f16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
@@ -231,8 +219,7 @@ define float @extractelt_nxv1f32_0(<vscale x 1 x float> %v) {
 define float @extractelt_nxv1f32_imm(<vscale x 1 x float> %v) {
 ; CHECK-LABEL: extractelt_nxv1f32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -244,8 +231,7 @@ define float @extractelt_nxv1f32_imm(<vscale x 1 x float> %v) {
 define float @extractelt_nxv1f32_idx(<vscale x 1 x float> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv1f32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -267,8 +253,7 @@ define float @extractelt_nxv2f32_0(<vscale x 2 x float> %v) {
 define float @extractelt_nxv2f32_imm(<vscale x 2 x float> %v) {
 ; CHECK-LABEL: extractelt_nxv2f32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -280,8 +265,7 @@ define float @extractelt_nxv2f32_imm(<vscale x 2 x float> %v) {
 define float @extractelt_nxv2f32_idx(<vscale x 2 x float> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv2f32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -303,8 +287,7 @@ define float @extractelt_nxv4f32_0(<vscale x 4 x float> %v) {
 define float @extractelt_nxv4f32_imm(<vscale x 4 x float> %v) {
 ; CHECK-LABEL: extractelt_nxv4f32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
@@ -316,8 +299,7 @@ define float @extractelt_nxv4f32_imm(<vscale x 4 x float> %v) {
 define float @extractelt_nxv4f32_idx(<vscale x 4 x float> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv4f32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
@@ -339,8 +321,7 @@ define float @extractelt_nxv8f32_0(<vscale x 8 x float> %v) {
 define float @extractelt_nxv8f32_imm(<vscale x 8 x float> %v) {
 ; CHECK-LABEL: extractelt_nxv8f32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
@@ -352,8 +333,7 @@ define float @extractelt_nxv8f32_imm(<vscale x 8 x float> %v) {
 define float @extractelt_nxv8f32_idx(<vscale x 8 x float> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv8f32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
@@ -375,8 +355,7 @@ define float @extractelt_nxv16f32_0(<vscale x 16 x float> %v) {
 define float @extractelt_nxv16f32_imm(<vscale x 16 x float> %v) {
 ; CHECK-LABEL: extractelt_nxv16f32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
@@ -388,8 +367,7 @@ define float @extractelt_nxv16f32_imm(<vscale x 16 x float> %v) {
 define float @extractelt_nxv16f32_idx(<vscale x 16 x float> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv16f32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
@@ -411,8 +389,7 @@ define double @extractelt_nxv1f64_0(<vscale x 1 x double> %v) {
 define double @extractelt_nxv1f64_imm(<vscale x 1 x double> %v) {
 ; CHECK-LABEL: extractelt_nxv1f64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -424,8 +401,7 @@ define double @extractelt_nxv1f64_imm(<vscale x 1 x double> %v) {
 define double @extractelt_nxv1f64_idx(<vscale x 1 x double> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv1f64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -447,8 +423,7 @@ define double @extractelt_nxv2f64_0(<vscale x 2 x double> %v) {
 define double @extractelt_nxv2f64_imm(<vscale x 2 x double> %v) {
 ; CHECK-LABEL: extractelt_nxv2f64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
@@ -460,8 +435,7 @@ define double @extractelt_nxv2f64_imm(<vscale x 2 x double> %v) {
 define double @extractelt_nxv2f64_idx(<vscale x 2 x double> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv2f64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e64,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
@@ -483,8 +457,7 @@ define double @extractelt_nxv4f64_0(<vscale x 4 x double> %v) {
 define double @extractelt_nxv4f64_imm(<vscale x 4 x double> %v) {
 ; CHECK-LABEL: extractelt_nxv4f64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e64,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
@@ -496,8 +469,7 @@ define double @extractelt_nxv4f64_imm(<vscale x 4 x double> %v) {
 define double @extractelt_nxv4f64_idx(<vscale x 4 x double> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv4f64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e64,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
@@ -519,8 +491,7 @@ define double @extractelt_nxv8f64_0(<vscale x 8 x double> %v) {
 define double @extractelt_nxv8f64_imm(<vscale x 8 x double> %v) {
 ; CHECK-LABEL: extractelt_nxv8f64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e64,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
@@ -532,8 +503,7 @@ define double @extractelt_nxv8f64_imm(<vscale x 8 x double> %v) {
 define double @extractelt_nxv8f64_idx(<vscale x 8 x double> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv8f64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e64,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll
index b8e8bb0e7280..0245626c630b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll
@@ -15,8 +15,7 @@ define half @extractelt_nxv1f16_0(<vscale x 1 x half> %v) {
 define half @extractelt_nxv1f16_imm(<vscale x 1 x half> %v) {
 ; CHECK-LABEL: extractelt_nxv1f16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -28,8 +27,7 @@ define half @extractelt_nxv1f16_imm(<vscale x 1 x half> %v) {
 define half @extractelt_nxv1f16_idx(<vscale x 1 x half> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv1f16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -51,8 +49,7 @@ define half @extractelt_nxv2f16_0(<vscale x 2 x half> %v) {
 define half @extractelt_nxv2f16_imm(<vscale x 2 x half> %v) {
 ; CHECK-LABEL: extractelt_nxv2f16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -64,8 +61,7 @@ define half @extractelt_nxv2f16_imm(<vscale x 2 x half> %v) {
 define half @extractelt_nxv2f16_idx(<vscale x 2 x half> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv2f16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -87,8 +83,7 @@ define half @extractelt_nxv4f16_0(<vscale x 4 x half> %v) {
 define half @extractelt_nxv4f16_imm(<vscale x 4 x half> %v) {
 ; CHECK-LABEL: extractelt_nxv4f16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -100,8 +95,7 @@ define half @extractelt_nxv4f16_imm(<vscale x 4 x half> %v) {
 define half @extractelt_nxv4f16_idx(<vscale x 4 x half> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv4f16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -123,8 +117,7 @@ define half @extractelt_nxv8f16_0(<vscale x 8 x half> %v) {
 define half @extractelt_nxv8f16_imm(<vscale x 8 x half> %v) {
 ; CHECK-LABEL: extractelt_nxv8f16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
@@ -136,8 +129,7 @@ define half @extractelt_nxv8f16_imm(<vscale x 8 x half> %v) {
 define half @extractelt_nxv8f16_idx(<vscale x 8 x half> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv8f16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
@@ -159,8 +151,7 @@ define half @extractelt_nxv16f16_0(<vscale x 16 x half> %v) {
 define half @extractelt_nxv16f16_imm(<vscale x 16 x half> %v) {
 ; CHECK-LABEL: extractelt_nxv16f16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
@@ -172,8 +163,7 @@ define half @extractelt_nxv16f16_imm(<vscale x 16 x half> %v) {
 define half @extractelt_nxv16f16_idx(<vscale x 16 x half> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv16f16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
@@ -195,8 +185,7 @@ define half @extractelt_nxv32f16_0(<vscale x 32 x half> %v) {
 define half @extractelt_nxv32f16_imm(<vscale x 32 x half> %v) {
 ; CHECK-LABEL: extractelt_nxv32f16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
@@ -208,8 +197,7 @@ define half @extractelt_nxv32f16_imm(<vscale x 32 x half> %v) {
 define half @extractelt_nxv32f16_idx(<vscale x 32 x half> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv32f16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
@@ -231,8 +219,7 @@ define float @extractelt_nxv1f32_0(<vscale x 1 x float> %v) {
 define float @extractelt_nxv1f32_imm(<vscale x 1 x float> %v) {
 ; CHECK-LABEL: extractelt_nxv1f32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -244,8 +231,7 @@ define float @extractelt_nxv1f32_imm(<vscale x 1 x float> %v) {
 define float @extractelt_nxv1f32_idx(<vscale x 1 x float> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv1f32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -267,8 +253,7 @@ define float @extractelt_nxv2f32_0(<vscale x 2 x float> %v) {
 define float @extractelt_nxv2f32_imm(<vscale x 2 x float> %v) {
 ; CHECK-LABEL: extractelt_nxv2f32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -280,8 +265,7 @@ define float @extractelt_nxv2f32_imm(<vscale x 2 x float> %v) {
 define float @extractelt_nxv2f32_idx(<vscale x 2 x float> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv2f32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -303,8 +287,7 @@ define float @extractelt_nxv4f32_0(<vscale x 4 x float> %v) {
 define float @extractelt_nxv4f32_imm(<vscale x 4 x float> %v) {
 ; CHECK-LABEL: extractelt_nxv4f32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
@@ -316,8 +299,7 @@ define float @extractelt_nxv4f32_imm(<vscale x 4 x float> %v) {
 define float @extractelt_nxv4f32_idx(<vscale x 4 x float> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv4f32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
@@ -339,8 +321,7 @@ define float @extractelt_nxv8f32_0(<vscale x 8 x float> %v) {
 define float @extractelt_nxv8f32_imm(<vscale x 8 x float> %v) {
 ; CHECK-LABEL: extractelt_nxv8f32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
@@ -352,8 +333,7 @@ define float @extractelt_nxv8f32_imm(<vscale x 8 x float> %v) {
 define float @extractelt_nxv8f32_idx(<vscale x 8 x float> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv8f32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
@@ -375,8 +355,7 @@ define float @extractelt_nxv16f32_0(<vscale x 16 x float> %v) {
 define float @extractelt_nxv16f32_imm(<vscale x 16 x float> %v) {
 ; CHECK-LABEL: extractelt_nxv16f32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
@@ -388,8 +367,7 @@ define float @extractelt_nxv16f32_imm(<vscale x 16 x float> %v) {
 define float @extractelt_nxv16f32_idx(<vscale x 16 x float> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv16f32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
@@ -411,8 +389,7 @@ define double @extractelt_nxv1f64_0(<vscale x 1 x double> %v) {
 define double @extractelt_nxv1f64_imm(<vscale x 1 x double> %v) {
 ; CHECK-LABEL: extractelt_nxv1f64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -424,8 +401,7 @@ define double @extractelt_nxv1f64_imm(<vscale x 1 x double> %v) {
 define double @extractelt_nxv1f64_idx(<vscale x 1 x double> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv1f64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
@@ -447,8 +423,7 @@ define double @extractelt_nxv2f64_0(<vscale x 2 x double> %v) {
 define double @extractelt_nxv2f64_imm(<vscale x 2 x double> %v) {
 ; CHECK-LABEL: extractelt_nxv2f64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
@@ -460,8 +435,7 @@ define double @extractelt_nxv2f64_imm(<vscale x 2 x double> %v) {
 define double @extractelt_nxv2f64_idx(<vscale x 2 x double> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv2f64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e64,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v26
@@ -483,8 +457,7 @@ define double @extractelt_nxv4f64_0(<vscale x 4 x double> %v) {
 define double @extractelt_nxv4f64_imm(<vscale x 4 x double> %v) {
 ; CHECK-LABEL: extractelt_nxv4f64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e64,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
@@ -496,8 +469,7 @@ define double @extractelt_nxv4f64_imm(<vscale x 4 x double> %v) {
 define double @extractelt_nxv4f64_idx(<vscale x 4 x double> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv4f64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e64,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v28
@@ -519,8 +491,7 @@ define double @extractelt_nxv8f64_0(<vscale x 8 x double> %v) {
 define double @extractelt_nxv8f64_imm(<vscale x 8 x double> %v) {
 ; CHECK-LABEL: extractelt_nxv8f64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e64,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
@@ -532,8 +503,7 @@ define double @extractelt_nxv8f64_imm(<vscale x 8 x double> %v) {
 define double @extractelt_nxv8f64_idx(<vscale x 8 x double> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv8f64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e64,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmv.f.s fa0, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
index 94e638d6b6a5..b75c83a1575c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
@@ -15,8 +15,7 @@ define signext i8 @extractelt_nxv1i8_0(<vscale x 1 x i8> %v) {
 define signext i8 @extractelt_nxv1i8_imm(<vscale x 1 x i8> %v) {
 ; CHECK-LABEL: extractelt_nxv1i8_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -28,8 +27,7 @@ define signext i8 @extractelt_nxv1i8_imm(<vscale x 1 x i8> %v) {
 define signext i8 @extractelt_nxv1i8_idx(<vscale x 1 x i8> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv1i8_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -51,8 +49,7 @@ define signext i8 @extractelt_nxv2i8_0(<vscale x 2 x i8> %v) {
 define signext i8 @extractelt_nxv2i8_imm(<vscale x 2 x i8> %v) {
 ; CHECK-LABEL: extractelt_nxv2i8_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -64,8 +61,7 @@ define signext i8 @extractelt_nxv2i8_imm(<vscale x 2 x i8> %v) {
 define signext i8 @extractelt_nxv2i8_idx(<vscale x 2 x i8> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv2i8_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -87,8 +83,7 @@ define signext i8 @extractelt_nxv4i8_0(<vscale x 4 x i8> %v) {
 define signext i8 @extractelt_nxv4i8_imm(<vscale x 4 x i8> %v) {
 ; CHECK-LABEL: extractelt_nxv4i8_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -100,8 +95,7 @@ define signext i8 @extractelt_nxv4i8_imm(<vscale x 4 x i8> %v) {
 define signext i8 @extractelt_nxv4i8_idx(<vscale x 4 x i8> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv4i8_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -123,8 +117,7 @@ define signext i8 @extractelt_nxv8i8_0(<vscale x 8 x i8> %v) {
 define signext i8 @extractelt_nxv8i8_imm(<vscale x 8 x i8> %v) {
 ; CHECK-LABEL: extractelt_nxv8i8_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e8,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -136,8 +129,7 @@ define signext i8 @extractelt_nxv8i8_imm(<vscale x 8 x i8> %v) {
 define signext i8 @extractelt_nxv8i8_idx(<vscale x 8 x i8> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv8i8_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e8,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -159,8 +151,7 @@ define signext i8 @extractelt_nxv16i8_0(<vscale x 16 x i8> %v) {
 define signext i8 @extractelt_nxv16i8_imm(<vscale x 16 x i8> %v) {
 ; CHECK-LABEL: extractelt_nxv16i8_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e8,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
@@ -172,8 +163,7 @@ define signext i8 @extractelt_nxv16i8_imm(<vscale x 16 x i8> %v) {
 define signext i8 @extractelt_nxv16i8_idx(<vscale x 16 x i8> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv16i8_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e8,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
@@ -195,8 +185,7 @@ define signext i8 @extractelt_nxv32i8_0(<vscale x 32 x i8> %v) {
 define signext i8 @extractelt_nxv32i8_imm(<vscale x 32 x i8> %v) {
 ; CHECK-LABEL: extractelt_nxv32i8_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e8,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
@@ -208,8 +197,7 @@ define signext i8 @extractelt_nxv32i8_imm(<vscale x 32 x i8> %v) {
 define signext i8 @extractelt_nxv32i8_idx(<vscale x 32 x i8> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv32i8_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e8,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
@@ -231,8 +219,7 @@ define signext i8 @extractelt_nxv64i8_0(<vscale x 64 x i8> %v) {
 define signext i8 @extractelt_nxv64i8_imm(<vscale x 64 x i8> %v) {
 ; CHECK-LABEL: extractelt_nxv64i8_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e8,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
@@ -244,8 +231,7 @@ define signext i8 @extractelt_nxv64i8_imm(<vscale x 64 x i8> %v) {
 define signext i8 @extractelt_nxv64i8_idx(<vscale x 64 x i8> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv64i8_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e8,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
@@ -267,8 +253,7 @@ define signext i16 @extractelt_nxv1i16_0(<vscale x 1 x i16> %v) {
 define signext i16 @extractelt_nxv1i16_imm(<vscale x 1 x i16> %v) {
 ; CHECK-LABEL: extractelt_nxv1i16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -280,8 +265,7 @@ define signext i16 @extractelt_nxv1i16_imm(<vscale x 1 x i16> %v) {
 define signext i16 @extractelt_nxv1i16_idx(<vscale x 1 x i16> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv1i16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -303,8 +287,7 @@ define signext i16 @extractelt_nxv2i16_0(<vscale x 2 x i16> %v) {
 define signext i16 @extractelt_nxv2i16_imm(<vscale x 2 x i16> %v) {
 ; CHECK-LABEL: extractelt_nxv2i16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -316,8 +299,7 @@ define signext i16 @extractelt_nxv2i16_imm(<vscale x 2 x i16> %v) {
 define signext i16 @extractelt_nxv2i16_idx(<vscale x 2 x i16> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv2i16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -339,8 +321,7 @@ define signext i16 @extractelt_nxv4i16_0(<vscale x 4 x i16> %v) {
 define signext i16 @extractelt_nxv4i16_imm(<vscale x 4 x i16> %v) {
 ; CHECK-LABEL: extractelt_nxv4i16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -352,8 +333,7 @@ define signext i16 @extractelt_nxv4i16_imm(<vscale x 4 x i16> %v) {
 define signext i16 @extractelt_nxv4i16_idx(<vscale x 4 x i16> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv4i16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -375,8 +355,7 @@ define signext i16 @extractelt_nxv8i16_0(<vscale x 8 x i16> %v) {
 define signext i16 @extractelt_nxv8i16_imm(<vscale x 8 x i16> %v) {
 ; CHECK-LABEL: extractelt_nxv8i16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
@@ -388,8 +367,7 @@ define signext i16 @extractelt_nxv8i16_imm(<vscale x 8 x i16> %v) {
 define signext i16 @extractelt_nxv8i16_idx(<vscale x 8 x i16> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv8i16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
@@ -411,8 +389,7 @@ define signext i16 @extractelt_nxv16i16_0(<vscale x 16 x i16> %v) {
 define signext i16 @extractelt_nxv16i16_imm(<vscale x 16 x i16> %v) {
 ; CHECK-LABEL: extractelt_nxv16i16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
@@ -424,8 +401,7 @@ define signext i16 @extractelt_nxv16i16_imm(<vscale x 16 x i16> %v) {
 define signext i16 @extractelt_nxv16i16_idx(<vscale x 16 x i16> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv16i16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
@@ -447,8 +423,7 @@ define signext i16 @extractelt_nxv32i16_0(<vscale x 32 x i16> %v) {
 define signext i16 @extractelt_nxv32i16_imm(<vscale x 32 x i16> %v) {
 ; CHECK-LABEL: extractelt_nxv32i16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
@@ -460,8 +435,7 @@ define signext i16 @extractelt_nxv32i16_imm(<vscale x 32 x i16> %v) {
 define signext i16 @extractelt_nxv32i16_idx(<vscale x 32 x i16> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv32i16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
@@ -483,8 +457,7 @@ define i32 @extractelt_nxv1i32_0(<vscale x 1 x i32> %v) {
 define i32 @extractelt_nxv1i32_imm(<vscale x 1 x i32> %v) {
 ; CHECK-LABEL: extractelt_nxv1i32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -496,8 +469,7 @@ define i32 @extractelt_nxv1i32_imm(<vscale x 1 x i32> %v) {
 define i32 @extractelt_nxv1i32_idx(<vscale x 1 x i32> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv1i32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -519,8 +491,7 @@ define i32 @extractelt_nxv2i32_0(<vscale x 2 x i32> %v) {
 define i32 @extractelt_nxv2i32_imm(<vscale x 2 x i32> %v) {
 ; CHECK-LABEL: extractelt_nxv2i32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -532,8 +503,7 @@ define i32 @extractelt_nxv2i32_imm(<vscale x 2 x i32> %v) {
 define i32 @extractelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv2i32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -555,8 +525,7 @@ define i32 @extractelt_nxv4i32_0(<vscale x 4 x i32> %v) {
 define i32 @extractelt_nxv4i32_imm(<vscale x 4 x i32> %v) {
 ; CHECK-LABEL: extractelt_nxv4i32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
@@ -568,8 +537,7 @@ define i32 @extractelt_nxv4i32_imm(<vscale x 4 x i32> %v) {
 define i32 @extractelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv4i32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
@@ -591,8 +559,7 @@ define i32 @extractelt_nxv8i32_0(<vscale x 8 x i32> %v) {
 define i32 @extractelt_nxv8i32_imm(<vscale x 8 x i32> %v) {
 ; CHECK-LABEL: extractelt_nxv8i32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
@@ -604,8 +571,7 @@ define i32 @extractelt_nxv8i32_imm(<vscale x 8 x i32> %v) {
 define i32 @extractelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv8i32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
@@ -627,8 +593,7 @@ define i32 @extractelt_nxv16i32_0(<vscale x 16 x i32> %v) {
 define i32 @extractelt_nxv16i32_imm(<vscale x 16 x i32> %v) {
 ; CHECK-LABEL: extractelt_nxv16i32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
@@ -640,8 +605,7 @@ define i32 @extractelt_nxv16i32_imm(<vscale x 16 x i32> %v) {
 define i32 @extractelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv16i32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
@@ -653,10 +617,9 @@ define i32 @extractelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 %idx) {
 define i64 @extractelt_nxv1i64_0(<vscale x 1 x i64> %v) {
 ; CHECK-LABEL: extractelt_nxv1i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v8, a1
+; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    vsetivli a1, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    vmv.x.s a0, v8
@@ -668,14 +631,13 @@ define i64 @extractelt_nxv1i64_0(<vscale x 1 x i64> %v) {
 define i64 @extractelt_nxv1i64_imm(<vscale x 1 x i64> %v) {
 ; CHECK-LABEL: extractelt_nxv1i64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v25, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
@@ -686,14 +648,13 @@ define i64 @extractelt_nxv1i64_imm(<vscale x 1 x i64> %v) {
 define i64 @extractelt_nxv1i64_idx(<vscale x 1 x i64> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv1i64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v25, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
@@ -704,10 +665,9 @@ define i64 @extractelt_nxv1i64_idx(<vscale x 1 x i64> %v, i32 %idx) {
 define i64 @extractelt_nxv2i64_0(<vscale x 2 x i64> %v) {
 ; CHECK-LABEL: extractelt_nxv2i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
-; CHECK-NEXT:    vsrl.vx v26, v8, a1
+; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    vsetivli a1, 1, e64,m2,ta,mu
+; CHECK-NEXT:    vsrl.vx v26, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v26
 ; CHECK-NEXT:    vmv.x.s a0, v8
@@ -719,14 +679,13 @@ define i64 @extractelt_nxv2i64_0(<vscale x 2 x i64> %v) {
 define i64 @extractelt_nxv2i64_imm(<vscale x 2 x i64> %v) {
 ; CHECK-LABEL: extractelt_nxv2i64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsrl.vx v26, v26, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m2,ta,mu
+; CHECK-NEXT:    vsrl.vx v26, v26, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v26
 ; CHECK-NEXT:    ret
@@ -737,14 +696,13 @@ define i64 @extractelt_nxv2i64_imm(<vscale x 2 x i64> %v) {
 define i64 @extractelt_nxv2i64_idx(<vscale x 2 x i64> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv2i64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e64,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsrl.vx v26, v26, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m2,ta,mu
+; CHECK-NEXT:    vsrl.vx v26, v26, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v26
 ; CHECK-NEXT:    ret
@@ -755,10 +713,9 @@ define i64 @extractelt_nxv2i64_idx(<vscale x 2 x i64> %v, i32 %idx) {
 define i64 @extractelt_nxv4i64_0(<vscale x 4 x i64> %v) {
 ; CHECK-LABEL: extractelt_nxv4i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
-; CHECK-NEXT:    vsrl.vx v28, v8, a1
+; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    vsetivli a1, 1, e64,m4,ta,mu
+; CHECK-NEXT:    vsrl.vx v28, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v28
 ; CHECK-NEXT:    vmv.x.s a0, v8
@@ -770,14 +727,13 @@ define i64 @extractelt_nxv4i64_0(<vscale x 4 x i64> %v) {
 define i64 @extractelt_nxv4i64_imm(<vscale x 4 x i64> %v) {
 ; CHECK-LABEL: extractelt_nxv4i64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e64,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsrl.vx v28, v28, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m4,ta,mu
+; CHECK-NEXT:    vsrl.vx v28, v28, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v28
 ; CHECK-NEXT:    ret
@@ -788,14 +744,13 @@ define i64 @extractelt_nxv4i64_imm(<vscale x 4 x i64> %v) {
 define i64 @extractelt_nxv4i64_idx(<vscale x 4 x i64> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv4i64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a2, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e64,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsrl.vx v28, v28, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m4,ta,mu
+; CHECK-NEXT:    vsrl.vx v28, v28, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v28
 ; CHECK-NEXT:    ret
@@ -806,10 +761,9 @@ define i64 @extractelt_nxv4i64_idx(<vscale x 4 x i64> %v, i32 %idx) {
 define i64 @extractelt_nxv8i64_0(<vscale x 8 x i64> %v) {
 ; CHECK-LABEL: extractelt_nxv8i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    addi a1, zero, 32
-; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
-; CHECK-NEXT:    vsrl.vx v16, v8, a1
+; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    vsetivli a1, 1, e64,m8,ta,mu
+; CHECK-NEXT:    vsrl.vx v16, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v16
 ; CHECK-NEXT:    vmv.x.s a0, v8
@@ -821,14 +775,13 @@ define i64 @extractelt_nxv8i64_0(<vscale x 8 x i64> %v) {
 define i64 @extractelt_nxv8i64_imm(<vscale x 8 x i64> %v) {
 ; CHECK-LABEL: extractelt_nxv8i64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e64,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m8,ta,mu
+; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    ret
@@ -839,14 +792,13 @@ define i64 @extractelt_nxv8i64_imm(<vscale x 8 x i64> %v) {
 define i64 @extractelt_nxv8i64_idx(<vscale x 8 x i64> %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv8i64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a2, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e64,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m8,ta,mu
+; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
index 405a39b7ab70..9a34a7e8aec1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
@@ -15,8 +15,7 @@ define signext i8 @extractelt_nxv1i8_0(<vscale x 1 x i8> %v) {
 define signext i8 @extractelt_nxv1i8_imm(<vscale x 1 x i8> %v) {
 ; CHECK-LABEL: extractelt_nxv1i8_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -28,8 +27,7 @@ define signext i8 @extractelt_nxv1i8_imm(<vscale x 1 x i8> %v) {
 define signext i8 @extractelt_nxv1i8_idx(<vscale x 1 x i8> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv1i8_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -51,8 +49,7 @@ define signext i8 @extractelt_nxv2i8_0(<vscale x 2 x i8> %v) {
 define signext i8 @extractelt_nxv2i8_imm(<vscale x 2 x i8> %v) {
 ; CHECK-LABEL: extractelt_nxv2i8_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -64,8 +61,7 @@ define signext i8 @extractelt_nxv2i8_imm(<vscale x 2 x i8> %v) {
 define signext i8 @extractelt_nxv2i8_idx(<vscale x 2 x i8> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv2i8_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -87,8 +83,7 @@ define signext i8 @extractelt_nxv4i8_0(<vscale x 4 x i8> %v) {
 define signext i8 @extractelt_nxv4i8_imm(<vscale x 4 x i8> %v) {
 ; CHECK-LABEL: extractelt_nxv4i8_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -100,8 +95,7 @@ define signext i8 @extractelt_nxv4i8_imm(<vscale x 4 x i8> %v) {
 define signext i8 @extractelt_nxv4i8_idx(<vscale x 4 x i8> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv4i8_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -123,8 +117,7 @@ define signext i8 @extractelt_nxv8i8_0(<vscale x 8 x i8> %v) {
 define signext i8 @extractelt_nxv8i8_imm(<vscale x 8 x i8> %v) {
 ; CHECK-LABEL: extractelt_nxv8i8_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e8,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -136,8 +129,7 @@ define signext i8 @extractelt_nxv8i8_imm(<vscale x 8 x i8> %v) {
 define signext i8 @extractelt_nxv8i8_idx(<vscale x 8 x i8> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv8i8_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e8,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -159,8 +151,7 @@ define signext i8 @extractelt_nxv16i8_0(<vscale x 16 x i8> %v) {
 define signext i8 @extractelt_nxv16i8_imm(<vscale x 16 x i8> %v) {
 ; CHECK-LABEL: extractelt_nxv16i8_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e8,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
@@ -172,8 +163,7 @@ define signext i8 @extractelt_nxv16i8_imm(<vscale x 16 x i8> %v) {
 define signext i8 @extractelt_nxv16i8_idx(<vscale x 16 x i8> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv16i8_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e8,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
@@ -195,8 +185,7 @@ define signext i8 @extractelt_nxv32i8_0(<vscale x 32 x i8> %v) {
 define signext i8 @extractelt_nxv32i8_imm(<vscale x 32 x i8> %v) {
 ; CHECK-LABEL: extractelt_nxv32i8_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e8,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
@@ -208,8 +197,7 @@ define signext i8 @extractelt_nxv32i8_imm(<vscale x 32 x i8> %v) {
 define signext i8 @extractelt_nxv32i8_idx(<vscale x 32 x i8> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv32i8_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e8,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
@@ -231,8 +219,7 @@ define signext i8 @extractelt_nxv64i8_0(<vscale x 64 x i8> %v) {
 define signext i8 @extractelt_nxv64i8_imm(<vscale x 64 x i8> %v) {
 ; CHECK-LABEL: extractelt_nxv64i8_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e8,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
@@ -244,8 +231,7 @@ define signext i8 @extractelt_nxv64i8_imm(<vscale x 64 x i8> %v) {
 define signext i8 @extractelt_nxv64i8_idx(<vscale x 64 x i8> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv64i8_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e8,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
@@ -267,8 +253,7 @@ define signext i16 @extractelt_nxv1i16_0(<vscale x 1 x i16> %v) {
 define signext i16 @extractelt_nxv1i16_imm(<vscale x 1 x i16> %v) {
 ; CHECK-LABEL: extractelt_nxv1i16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -280,8 +265,7 @@ define signext i16 @extractelt_nxv1i16_imm(<vscale x 1 x i16> %v) {
 define signext i16 @extractelt_nxv1i16_idx(<vscale x 1 x i16> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv1i16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -303,8 +287,7 @@ define signext i16 @extractelt_nxv2i16_0(<vscale x 2 x i16> %v) {
 define signext i16 @extractelt_nxv2i16_imm(<vscale x 2 x i16> %v) {
 ; CHECK-LABEL: extractelt_nxv2i16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -316,8 +299,7 @@ define signext i16 @extractelt_nxv2i16_imm(<vscale x 2 x i16> %v) {
 define signext i16 @extractelt_nxv2i16_idx(<vscale x 2 x i16> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv2i16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -339,8 +321,7 @@ define signext i16 @extractelt_nxv4i16_0(<vscale x 4 x i16> %v) {
 define signext i16 @extractelt_nxv4i16_imm(<vscale x 4 x i16> %v) {
 ; CHECK-LABEL: extractelt_nxv4i16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -352,8 +333,7 @@ define signext i16 @extractelt_nxv4i16_imm(<vscale x 4 x i16> %v) {
 define signext i16 @extractelt_nxv4i16_idx(<vscale x 4 x i16> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv4i16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -375,8 +355,7 @@ define signext i16 @extractelt_nxv8i16_0(<vscale x 8 x i16> %v) {
 define signext i16 @extractelt_nxv8i16_imm(<vscale x 8 x i16> %v) {
 ; CHECK-LABEL: extractelt_nxv8i16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
@@ -388,8 +367,7 @@ define signext i16 @extractelt_nxv8i16_imm(<vscale x 8 x i16> %v) {
 define signext i16 @extractelt_nxv8i16_idx(<vscale x 8 x i16> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv8i16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
@@ -411,8 +389,7 @@ define signext i16 @extractelt_nxv16i16_0(<vscale x 16 x i16> %v) {
 define signext i16 @extractelt_nxv16i16_imm(<vscale x 16 x i16> %v) {
 ; CHECK-LABEL: extractelt_nxv16i16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
@@ -424,8 +401,7 @@ define signext i16 @extractelt_nxv16i16_imm(<vscale x 16 x i16> %v) {
 define signext i16 @extractelt_nxv16i16_idx(<vscale x 16 x i16> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv16i16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
@@ -447,8 +423,7 @@ define signext i16 @extractelt_nxv32i16_0(<vscale x 32 x i16> %v) {
 define signext i16 @extractelt_nxv32i16_imm(<vscale x 32 x i16> %v) {
 ; CHECK-LABEL: extractelt_nxv32i16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
@@ -460,8 +435,7 @@ define signext i16 @extractelt_nxv32i16_imm(<vscale x 32 x i16> %v) {
 define signext i16 @extractelt_nxv32i16_idx(<vscale x 32 x i16> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv32i16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
@@ -483,8 +457,7 @@ define signext i32 @extractelt_nxv1i32_0(<vscale x 1 x i32> %v) {
 define signext i32 @extractelt_nxv1i32_imm(<vscale x 1 x i32> %v) {
 ; CHECK-LABEL: extractelt_nxv1i32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -496,8 +469,7 @@ define signext i32 @extractelt_nxv1i32_imm(<vscale x 1 x i32> %v) {
 define signext i32 @extractelt_nxv1i32_idx(<vscale x 1 x i32> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv1i32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -519,8 +491,7 @@ define signext i32 @extractelt_nxv2i32_0(<vscale x 2 x i32> %v) {
 define signext i32 @extractelt_nxv2i32_imm(<vscale x 2 x i32> %v) {
 ; CHECK-LABEL: extractelt_nxv2i32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -532,8 +503,7 @@ define signext i32 @extractelt_nxv2i32_imm(<vscale x 2 x i32> %v) {
 define signext i32 @extractelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv2i32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -555,8 +525,7 @@ define signext i32 @extractelt_nxv4i32_0(<vscale x 4 x i32> %v) {
 define signext i32 @extractelt_nxv4i32_imm(<vscale x 4 x i32> %v) {
 ; CHECK-LABEL: extractelt_nxv4i32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
@@ -568,8 +537,7 @@ define signext i32 @extractelt_nxv4i32_imm(<vscale x 4 x i32> %v) {
 define signext i32 @extractelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv4i32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
@@ -591,8 +559,7 @@ define signext i32 @extractelt_nxv8i32_0(<vscale x 8 x i32> %v) {
 define signext i32 @extractelt_nxv8i32_imm(<vscale x 8 x i32> %v) {
 ; CHECK-LABEL: extractelt_nxv8i32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
@@ -604,8 +571,7 @@ define signext i32 @extractelt_nxv8i32_imm(<vscale x 8 x i32> %v) {
 define signext i32 @extractelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv8i32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
@@ -627,8 +593,7 @@ define signext i32 @extractelt_nxv16i32_0(<vscale x 16 x i32> %v) {
 define signext i32 @extractelt_nxv16i32_imm(<vscale x 16 x i32> %v) {
 ; CHECK-LABEL: extractelt_nxv16i32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
@@ -640,8 +605,7 @@ define signext i32 @extractelt_nxv16i32_imm(<vscale x 16 x i32> %v) {
 define signext i32 @extractelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv16i32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
@@ -663,8 +627,7 @@ define i64 @extractelt_nxv1i64_0(<vscale x 1 x i64> %v) {
 define i64 @extractelt_nxv1i64_imm(<vscale x 1 x i64> %v) {
 ; CHECK-LABEL: extractelt_nxv1i64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -676,8 +639,7 @@ define i64 @extractelt_nxv1i64_imm(<vscale x 1 x i64> %v) {
 define i64 @extractelt_nxv1i64_idx(<vscale x 1 x i64> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv1i64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v25, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
@@ -699,8 +661,7 @@ define i64 @extractelt_nxv2i64_0(<vscale x 2 x i64> %v) {
 define i64 @extractelt_nxv2i64_imm(<vscale x 2 x i64> %v) {
 ; CHECK-LABEL: extractelt_nxv2i64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v26, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
@@ -712,8 +673,7 @@ define i64 @extractelt_nxv2i64_imm(<vscale x 2 x i64> %v) {
 define i64 @extractelt_nxv2i64_idx(<vscale x 2 x i64> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv2i64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e64,m2,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v26, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v26
@@ -735,8 +695,7 @@ define i64 @extractelt_nxv4i64_0(<vscale x 4 x i64> %v) {
 define i64 @extractelt_nxv4i64_imm(<vscale x 4 x i64> %v) {
 ; CHECK-LABEL: extractelt_nxv4i64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e64,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v28, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
@@ -748,8 +707,7 @@ define i64 @extractelt_nxv4i64_imm(<vscale x 4 x i64> %v) {
 define i64 @extractelt_nxv4i64_idx(<vscale x 4 x i64> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv4i64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e64,m4,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v28, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v28
@@ -771,8 +729,7 @@ define i64 @extractelt_nxv8i64_0(<vscale x 8 x i64> %v) {
 define i64 @extractelt_nxv8i64_imm(<vscale x 8 x i64> %v) {
 ; CHECK-LABEL: extractelt_nxv8i64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
-; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vsetivli a0, 1, e64,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8
@@ -784,8 +741,7 @@ define i64 @extractelt_nxv8i64_imm(<vscale x 8 x i64> %v) {
 define i64 @extractelt_nxv8i64_idx(<vscale x 8 x i64> %v, i32 signext %idx) {
 ; CHECK-LABEL: extractelt_nxv8i64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e64,m8,ta,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
index 3e8539a5a4ae..ba325abb32dc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
@@ -5,11 +5,9 @@
 define i8 @extractelt_v16i8(<16 x i8>* %x) nounwind {
 ; RV32-LABEL: extractelt_v16i8:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a1, zero, 16
-; RV32-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; RV32-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; RV32-NEXT:    vle8.v v25, (a0)
-; RV32-NEXT:    addi a0, zero, 1
-; RV32-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; RV32-NEXT:    vsetivli a0, 1, e8,m1,ta,mu
 ; RV32-NEXT:    vslidedown.vi v25, v25, 7
 ; RV32-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; RV32-NEXT:    vmv.x.s a0, v25
@@ -17,11 +15,9 @@ define i8 @extractelt_v16i8(<16 x i8>* %x) nounwind {
 ;
 ; RV64-LABEL: extractelt_v16i8:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a1, zero, 16
-; RV64-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; RV64-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; RV64-NEXT:    vle8.v v25, (a0)
-; RV64-NEXT:    addi a0, zero, 1
-; RV64-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; RV64-NEXT:    vsetivli a0, 1, e8,m1,ta,mu
 ; RV64-NEXT:    vslidedown.vi v25, v25, 7
 ; RV64-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
 ; RV64-NEXT:    vmv.x.s a0, v25
@@ -34,11 +30,9 @@ define i8 @extractelt_v16i8(<16 x i8>* %x) nounwind {
 define i16 @extractelt_v8i16(<8 x i16>* %x) nounwind {
 ; RV32-LABEL: extractelt_v8i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a1, zero, 8
-; RV32-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; RV32-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; RV32-NEXT:    vle16.v v25, (a0)
-; RV32-NEXT:    addi a0, zero, 1
-; RV32-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; RV32-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
 ; RV32-NEXT:    vslidedown.vi v25, v25, 7
 ; RV32-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
 ; RV32-NEXT:    vmv.x.s a0, v25
@@ -46,11 +40,9 @@ define i16 @extractelt_v8i16(<8 x i16>* %x) nounwind {
 ;
 ; RV64-LABEL: extractelt_v8i16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a1, zero, 8
-; RV64-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; RV64-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; RV64-NEXT:    vle16.v v25, (a0)
-; RV64-NEXT:    addi a0, zero, 1
-; RV64-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; RV64-NEXT:    vsetivli a0, 1, e16,m1,ta,mu
 ; RV64-NEXT:    vslidedown.vi v25, v25, 7
 ; RV64-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
 ; RV64-NEXT:    vmv.x.s a0, v25
@@ -63,11 +55,9 @@ define i16 @extractelt_v8i16(<8 x i16>* %x) nounwind {
 define i32 @extractelt_v4i32(<4 x i32>* %x) nounwind {
 ; RV32-LABEL: extractelt_v4i32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a1, zero, 4
-; RV32-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; RV32-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; RV32-NEXT:    vle32.v v25, (a0)
-; RV32-NEXT:    addi a0, zero, 1
-; RV32-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; RV32-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
 ; RV32-NEXT:    vslidedown.vi v25, v25, 2
 ; RV32-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; RV32-NEXT:    vmv.x.s a0, v25
@@ -75,11 +65,9 @@ define i32 @extractelt_v4i32(<4 x i32>* %x) nounwind {
 ;
 ; RV64-LABEL: extractelt_v4i32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a1, zero, 4
-; RV64-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; RV64-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; RV64-NEXT:    vle32.v v25, (a0)
-; RV64-NEXT:    addi a0, zero, 1
-; RV64-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; RV64-NEXT:    vsetivli a0, 1, e32,m1,ta,mu
 ; RV64-NEXT:    vslidedown.vi v25, v25, 2
 ; RV64-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
 ; RV64-NEXT:    vmv.x.s a0, v25
@@ -92,23 +80,20 @@ define i32 @extractelt_v4i32(<4 x i32>* %x) nounwind {
 define i64 @extractelt_v2i64(<2 x i64>* %x) nounwind {
 ; RV32-LABEL: extractelt_v2i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a1, zero, 2
-; RV32-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; RV32-NEXT:    vle64.v v25, (a0)
 ; RV32-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; RV32-NEXT:    vmv.x.s a0, v25
-; RV32-NEXT:    addi a1, zero, 1
-; RV32-NEXT:    addi a2, zero, 32
-; RV32-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; RV32-NEXT:    vsrl.vx v25, v25, a2
+; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; RV32-NEXT:    vsrl.vx v25, v25, a1
 ; RV32-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; RV32-NEXT:    vmv.x.s a1, v25
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: extractelt_v2i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a1, zero, 2
-; RV64-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; RV64-NEXT:    vle64.v v25, (a0)
 ; RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; RV64-NEXT:    vmv.x.s a0, v25
@@ -124,8 +109,7 @@ define i8 @extractelt_v32i8(<32 x i8>* %x) nounwind {
 ; RV32-NEXT:    addi a1, zero, 32
 ; RV32-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; RV32-NEXT:    vle8.v v26, (a0)
-; RV32-NEXT:    addi a0, zero, 1
-; RV32-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; RV32-NEXT:    vsetivli a0, 1, e8,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v26, v26, 7
 ; RV32-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a0, v26
@@ -136,8 +120,7 @@ define i8 @extractelt_v32i8(<32 x i8>* %x) nounwind {
 ; RV64-NEXT:    addi a1, zero, 32
 ; RV64-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; RV64-NEXT:    vle8.v v26, (a0)
-; RV64-NEXT:    addi a0, zero, 1
-; RV64-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; RV64-NEXT:    vsetivli a0, 1, e8,m2,ta,mu
 ; RV64-NEXT:    vslidedown.vi v26, v26, 7
 ; RV64-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
 ; RV64-NEXT:    vmv.x.s a0, v26
@@ -150,11 +133,9 @@ define i8 @extractelt_v32i8(<32 x i8>* %x) nounwind {
 define i16 @extractelt_v16i16(<16 x i16>* %x) nounwind {
 ; RV32-LABEL: extractelt_v16i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a1, zero, 16
-; RV32-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; RV32-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
 ; RV32-NEXT:    vle16.v v26, (a0)
-; RV32-NEXT:    addi a0, zero, 1
-; RV32-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; RV32-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v26, v26, 7
 ; RV32-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a0, v26
@@ -162,11 +143,9 @@ define i16 @extractelt_v16i16(<16 x i16>* %x) nounwind {
 ;
 ; RV64-LABEL: extractelt_v16i16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a1, zero, 16
-; RV64-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; RV64-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
 ; RV64-NEXT:    vle16.v v26, (a0)
-; RV64-NEXT:    addi a0, zero, 1
-; RV64-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; RV64-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
 ; RV64-NEXT:    vslidedown.vi v26, v26, 7
 ; RV64-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
 ; RV64-NEXT:    vmv.x.s a0, v26
@@ -179,11 +158,9 @@ define i16 @extractelt_v16i16(<16 x i16>* %x) nounwind {
 define i32 @extractelt_v8i32(<8 x i32>* %x) nounwind {
 ; RV32-LABEL: extractelt_v8i32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a1, zero, 8
-; RV32-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; RV32-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
 ; RV32-NEXT:    vle32.v v26, (a0)
-; RV32-NEXT:    addi a0, zero, 1
-; RV32-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; RV32-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v26, v26, 6
 ; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a0, v26
@@ -191,11 +168,9 @@ define i32 @extractelt_v8i32(<8 x i32>* %x) nounwind {
 ;
 ; RV64-LABEL: extractelt_v8i32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a1, zero, 8
-; RV64-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; RV64-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
 ; RV64-NEXT:    vle32.v v26, (a0)
-; RV64-NEXT:    addi a0, zero, 1
-; RV64-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; RV64-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
 ; RV64-NEXT:    vslidedown.vi v26, v26, 6
 ; RV64-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; RV64-NEXT:    vmv.x.s a0, v26
@@ -208,28 +183,24 @@ define i32 @extractelt_v8i32(<8 x i32>* %x) nounwind {
 define i64 @extractelt_v4i64(<4 x i64>* %x) nounwind {
 ; RV32-LABEL: extractelt_v4i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a1, zero, 4
-; RV32-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; RV32-NEXT:    vsetivli a1, 4, e64,m2,ta,mu
 ; RV32-NEXT:    vle64.v v26, (a0)
-; RV32-NEXT:    addi a1, zero, 1
-; RV32-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
+; RV32-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v26, v26, 3
 ; RV32-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a0, v26
-; RV32-NEXT:    addi a2, zero, 32
-; RV32-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; RV32-NEXT:    vsrl.vx v26, v26, a2
+; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    vsetivli a2, 1, e64,m2,ta,mu
+; RV32-NEXT:    vsrl.vx v26, v26, a1
 ; RV32-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a1, v26
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: extractelt_v4i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a1, zero, 4
-; RV64-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; RV64-NEXT:    vsetivli a1, 4, e64,m2,ta,mu
 ; RV64-NEXT:    vle64.v v26, (a0)
-; RV64-NEXT:    addi a0, zero, 1
-; RV64-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; RV64-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
 ; RV64-NEXT:    vslidedown.vi v26, v26, 3
 ; RV64-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; RV64-NEXT:    vmv.x.s a0, v26
@@ -246,15 +217,13 @@ define i64 @extractelt_v4i64(<4 x i64>* %x) nounwind {
 define i64 @extractelt_v3i64(<3 x i64>* %x) nounwind {
 ; RV32-LABEL: extractelt_v3i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a1, zero, 8
-; RV32-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; RV32-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
 ; RV32-NEXT:    vle32.v v26, (a0)
-; RV32-NEXT:    addi a1, zero, 1
-; RV32-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
+; RV32-NEXT:    vsetivli a0, 1, e32,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v28, v26, 4
 ; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a0, v28
-; RV32-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; RV32-NEXT:    vsetivli a1, 1, e32,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v26, v26, 5
 ; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
 ; RV32-NEXT:    vmv.x.s a1, v26
@@ -262,11 +231,9 @@ define i64 @extractelt_v3i64(<3 x i64>* %x) nounwind {
 ;
 ; RV64-LABEL: extractelt_v3i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a1, zero, 4
-; RV64-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; RV64-NEXT:    vsetivli a1, 4, e64,m2,ta,mu
 ; RV64-NEXT:    vle64.v v26, (a0)
-; RV64-NEXT:    addi a0, zero, 1
-; RV64-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; RV64-NEXT:    vsetivli a0, 1, e64,m2,ta,mu
 ; RV64-NEXT:    vslidedown.vi v26, v26, 2
 ; RV64-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; RV64-NEXT:    vmv.x.s a0, v26

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
index ca8229bf0da6..4844ee024b16 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
@@ -12,8 +12,7 @@ define void @buildvec_no_vid_v4f32(<4 x float>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
 ; CHECK-NEXT:    addi a1, a1, %lo(.LCPI0_0)
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a1)
 ; CHECK-NEXT:    vse32.v v25, (a0)
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
index 3eb6cb85f916..8c880c236bb7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
@@ -5,12 +5,11 @@
 define void @fcmp_oeq_vv_v8f16(<8 x half>* %x, <8 x half>* %y, <8 x i1>* %z) {
 ; CHECK-LABEL: fcmp_oeq_vv_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 8
-; CHECK-NEXT:    vsetvli a4, a3, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vmfeq.vv v27, v25, v26
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v27, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
@@ -23,12 +22,11 @@ define void @fcmp_oeq_vv_v8f16(<8 x half>* %x, <8 x half>* %y, <8 x i1>* %z) {
 define void @fcmp_oeq_vv_v8f16_nonans(<8 x half>* %x, <8 x half>* %y, <8 x i1>* %z) {
 ; CHECK-LABEL: fcmp_oeq_vv_v8f16_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 8
-; CHECK-NEXT:    vsetvli a4, a3, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vmfeq.vv v27, v25, v26
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v27, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
@@ -41,12 +39,11 @@ define void @fcmp_oeq_vv_v8f16_nonans(<8 x half>* %x, <8 x half>* %y, <8 x i1>*
 define void @fcmp_une_vv_v4f32(<4 x float>* %x, <4 x float>* %y, <4 x i1>* %z) {
 ; CHECK-LABEL: fcmp_une_vv_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 4
-; CHECK-NEXT:    vsetvli a4, a3, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vmfne.vv v27, v25, v26
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 4, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v27, (a2)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
@@ -59,12 +56,11 @@ define void @fcmp_une_vv_v4f32(<4 x float>* %x, <4 x float>* %y, <4 x i1>* %z) {
 define void @fcmp_une_vv_v4f32_nonans(<4 x float>* %x, <4 x float>* %y, <4 x i1>* %z) {
 ; CHECK-LABEL: fcmp_une_vv_v4f32_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 4
-; CHECK-NEXT:    vsetvli a4, a3, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vmfne.vv v27, v25, v26
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 4, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v27, (a2)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
@@ -77,12 +73,11 @@ define void @fcmp_une_vv_v4f32_nonans(<4 x float>* %x, <4 x float>* %y, <4 x i1>
 define void @fcmp_ogt_vv_v2f64(<2 x double>* %x, <2 x double>* %y, <2 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ogt_vv_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 2
-; CHECK-NEXT:    vsetvli a4, a3, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vmflt.vv v27, v26, v25
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 2, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v27, (a2)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
@@ -95,12 +90,11 @@ define void @fcmp_ogt_vv_v2f64(<2 x double>* %x, <2 x double>* %y, <2 x i1>* %z)
 define void @fcmp_ogt_vv_v2f64_nonans(<2 x double>* %x, <2 x double>* %y, <2 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ogt_vv_v2f64_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 2
-; CHECK-NEXT:    vsetvli a4, a3, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vmflt.vv v27, v26, v25
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 2, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v27, (a2)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
@@ -113,12 +107,11 @@ define void @fcmp_ogt_vv_v2f64_nonans(<2 x double>* %x, <2 x double>* %y, <2 x i
 define void @fcmp_olt_vv_v16f16(<16 x half>* %x, <16 x half>* %y, <16 x i1>* %z) {
 ; CHECK-LABEL: fcmp_olt_vv_v16f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 16
-; CHECK-NEXT:    vsetvli a4, a3, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a3, 16, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vmflt.vv v25, v26, v28
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <16 x half>, <16 x half>* %x
@@ -131,12 +124,11 @@ define void @fcmp_olt_vv_v16f16(<16 x half>* %x, <16 x half>* %y, <16 x i1>* %z)
 define void @fcmp_olt_vv_v16f16_nonans(<16 x half>* %x, <16 x half>* %y, <16 x i1>* %z) {
 ; CHECK-LABEL: fcmp_olt_vv_v16f16_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 16
-; CHECK-NEXT:    vsetvli a4, a3, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a3, 16, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vmflt.vv v25, v26, v28
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <16 x half>, <16 x half>* %x
@@ -149,12 +141,11 @@ define void @fcmp_olt_vv_v16f16_nonans(<16 x half>* %x, <16 x half>* %y, <16 x i
 define void @fcmp_oge_vv_v8f32(<8 x float>* %x, <8 x float>* %y, <8 x i1>* %z) {
 ; CHECK-LABEL: fcmp_oge_vv_v8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 8
-; CHECK-NEXT:    vsetvli a4, a3, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a3, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vmfle.vv v25, v28, v26
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
@@ -167,12 +158,11 @@ define void @fcmp_oge_vv_v8f32(<8 x float>* %x, <8 x float>* %y, <8 x i1>* %z) {
 define void @fcmp_oge_vv_v8f32_nonans(<8 x float>* %x, <8 x float>* %y, <8 x i1>* %z) {
 ; CHECK-LABEL: fcmp_oge_vv_v8f32_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 8
-; CHECK-NEXT:    vsetvli a4, a3, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a3, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vmfle.vv v25, v28, v26
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
@@ -185,12 +175,11 @@ define void @fcmp_oge_vv_v8f32_nonans(<8 x float>* %x, <8 x float>* %y, <8 x i1>
 define void @fcmp_ole_vv_v4f64(<4 x double>* %x, <4 x double>* %y, <4 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ole_vv_v4f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 4
-; CHECK-NEXT:    vsetvli a4, a3, e64,m2,ta,mu
+; CHECK-NEXT:    vsetivli a3, 4, e64,m2,ta,mu
 ; CHECK-NEXT:    vle64.v v26, (a0)
 ; CHECK-NEXT:    vle64.v v28, (a1)
 ; CHECK-NEXT:    vmfle.vv v25, v26, v28
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 4, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <4 x double>, <4 x double>* %x
@@ -203,12 +192,11 @@ define void @fcmp_ole_vv_v4f64(<4 x double>* %x, <4 x double>* %y, <4 x i1>* %z)
 define void @fcmp_ole_vv_v4f64_nonans(<4 x double>* %x, <4 x double>* %y, <4 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ole_vv_v4f64_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 4
-; CHECK-NEXT:    vsetvli a4, a3, e64,m2,ta,mu
+; CHECK-NEXT:    vsetivli a3, 4, e64,m2,ta,mu
 ; CHECK-NEXT:    vle64.v v26, (a0)
 ; CHECK-NEXT:    vle64.v v28, (a1)
 ; CHECK-NEXT:    vmfle.vv v25, v26, v28
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 4, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <4 x double>, <4 x double>* %x
@@ -258,12 +246,11 @@ define void @fcmp_ule_vv_v32f16_nonans(<32 x half>* %x, <32 x half>* %y, <32 x i
 define void @fcmp_uge_vv_v16f32(<16 x float>* %x, <16 x float>* %y, <16 x i1>* %z) {
 ; CHECK-LABEL: fcmp_uge_vv_v16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 16
-; CHECK-NEXT:    vsetvli a4, a3, e32,m4,ta,mu
+; CHECK-NEXT:    vsetivli a3, 16, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
 ; CHECK-NEXT:    vle32.v v8, (a1)
 ; CHECK-NEXT:    vmflt.vv v25, v28, v8
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v25, v25, v25
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
@@ -277,12 +264,11 @@ define void @fcmp_uge_vv_v16f32(<16 x float>* %x, <16 x float>* %y, <16 x i1>* %
 define void @fcmp_uge_vv_v16f32_nonans(<16 x float>* %x, <16 x float>* %y, <16 x i1>* %z) {
 ; CHECK-LABEL: fcmp_uge_vv_v16f32_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 16
-; CHECK-NEXT:    vsetvli a4, a3, e32,m4,ta,mu
+; CHECK-NEXT:    vsetivli a3, 16, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
 ; CHECK-NEXT:    vle32.v v8, (a1)
 ; CHECK-NEXT:    vmfle.vv v25, v8, v28
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <16 x float>, <16 x float>* %x
@@ -295,14 +281,13 @@ define void @fcmp_uge_vv_v16f32_nonans(<16 x float>* %x, <16 x float>* %y, <16 x
 define void @fcmp_ult_vv_v8f64(<8 x double>* %x, <8 x double>* %y, <8 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ult_vv_v8f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 8
-; CHECK-NEXT:    vsetvli a4, a3, e64,m4,ta,mu
+; CHECK-NEXT:    vsetivli a3, 8, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
 ; CHECK-NEXT:    vle64.v v8, (a1)
 ; CHECK-NEXT:    vmfle.vv v25, v8, v28
-; CHECK-NEXT:    vsetvli a0, a3, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmnand.mm v25, v25, v25
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %x
@@ -315,12 +300,11 @@ define void @fcmp_ult_vv_v8f64(<8 x double>* %x, <8 x double>* %y, <8 x i1>* %z)
 define void @fcmp_ult_vv_v8f64_nonans(<8 x double>* %x, <8 x double>* %y, <8 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ult_vv_v8f64_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 8
-; CHECK-NEXT:    vsetvli a4, a3, e64,m4,ta,mu
+; CHECK-NEXT:    vsetivli a3, 8, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
 ; CHECK-NEXT:    vle64.v v8, (a1)
 ; CHECK-NEXT:    vmflt.vv v25, v28, v8
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %x
@@ -408,13 +392,12 @@ define void @fcmp_ueq_vv_v32f32_nonans(<32 x float>* %x, <32 x float>* %y, <32 x
 define void @fcmp_one_vv_v8f64(<16 x double>* %x, <16 x double>* %y, <16 x i1>* %z) {
 ; CHECK-LABEL: fcmp_one_vv_v8f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 16
-; CHECK-NEXT:    vsetvli a4, a3, e64,m8,ta,mu
+; CHECK-NEXT:    vsetivli a3, 16, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vle64.v v16, (a1)
 ; CHECK-NEXT:    vmflt.vv v25, v16, v8
 ; CHECK-NEXT:    vmflt.vv v26, v8, v16
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v25, v26, v25
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
@@ -428,12 +411,11 @@ define void @fcmp_one_vv_v8f64(<16 x double>* %x, <16 x double>* %y, <16 x i1>*
 define void @fcmp_one_vv_v8f64_nonans(<16 x double>* %x, <16 x double>* %y, <16 x i1>* %z) {
 ; CHECK-LABEL: fcmp_one_vv_v8f64_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 16
-; CHECK-NEXT:    vsetvli a4, a3, e64,m8,ta,mu
+; CHECK-NEXT:    vsetivli a3, 16, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vle64.v v16, (a1)
 ; CHECK-NEXT:    vmfne.vv v25, v8, v16
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <16 x double>, <16 x double>* %x
@@ -446,15 +428,14 @@ define void @fcmp_one_vv_v8f64_nonans(<16 x double>* %x, <16 x double>* %y, <16
 define void @fcmp_ord_vv_v4f16(<4 x half>* %x, <4 x half>* %y, <4 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ord_vv_v4f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 4
-; CHECK-NEXT:    vsetvli a4, a3, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 4, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a1)
 ; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vmfeq.vv v27, v25, v25
 ; CHECK-NEXT:    vmfeq.vv v25, v26, v26
-; CHECK-NEXT:    vsetvli a0, a3, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 4, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmand.mm v25, v25, v27
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 4, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <4 x half>, <4 x half>* %x
@@ -467,15 +448,14 @@ define void @fcmp_ord_vv_v4f16(<4 x half>* %x, <4 x half>* %y, <4 x i1>* %z) {
 define void @fcmp_uno_vv_v4f16(<2 x half>* %x, <2 x half>* %y, <2 x i1>* %z) {
 ; CHECK-LABEL: fcmp_uno_vv_v4f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 2
-; CHECK-NEXT:    vsetvli a4, a3, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 2, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a1)
 ; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vmfne.vv v27, v25, v25
 ; CHECK-NEXT:    vmfne.vv v25, v26, v26
-; CHECK-NEXT:    vsetvli a0, a3, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 2, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmor.mm v25, v25, v27
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 2, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <2 x half>, <2 x half>* %x
@@ -488,11 +468,10 @@ define void @fcmp_uno_vv_v4f16(<2 x half>* %x, <2 x half>* %y, <2 x i1>* %z) {
 define void @fcmp_oeq_vf_v8f16(<8 x half>* %x, half %y, <8 x i1>* %z) {
 ; CHECK-LABEL: fcmp_oeq_vf_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vmfeq.vf v26, v25, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v26, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
@@ -506,11 +485,10 @@ define void @fcmp_oeq_vf_v8f16(<8 x half>* %x, half %y, <8 x i1>* %z) {
 define void @fcmp_oeq_vf_v8f16_nonans(<8 x half>* %x, half %y, <8 x i1>* %z) {
 ; CHECK-LABEL: fcmp_oeq_vf_v8f16_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vmfeq.vf v26, v25, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v26, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
@@ -524,11 +502,10 @@ define void @fcmp_oeq_vf_v8f16_nonans(<8 x half>* %x, half %y, <8 x i1>* %z) {
 define void @fcmp_une_vf_v4f32(<4 x float>* %x, float %y, <4 x i1>* %z) {
 ; CHECK-LABEL: fcmp_une_vf_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vmfne.vf v26, v25, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 4, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v26, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
@@ -542,11 +519,10 @@ define void @fcmp_une_vf_v4f32(<4 x float>* %x, float %y, <4 x i1>* %z) {
 define void @fcmp_une_vf_v4f32_nonans(<4 x float>* %x, float %y, <4 x i1>* %z) {
 ; CHECK-LABEL: fcmp_une_vf_v4f32_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vmfne.vf v26, v25, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 4, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v26, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
@@ -560,11 +536,10 @@ define void @fcmp_une_vf_v4f32_nonans(<4 x float>* %x, float %y, <4 x i1>* %z) {
 define void @fcmp_ogt_vf_v2f64(<2 x double>* %x, double %y, <2 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ogt_vf_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vmfgt.vf v26, v25, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 2, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v26, (a1)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
@@ -578,11 +553,10 @@ define void @fcmp_ogt_vf_v2f64(<2 x double>* %x, double %y, <2 x i1>* %z) {
 define void @fcmp_ogt_vf_v2f64_nonans(<2 x double>* %x, double %y, <2 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ogt_vf_v2f64_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vmfgt.vf v26, v25, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 2, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v26, (a1)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
@@ -596,11 +570,10 @@ define void @fcmp_ogt_vf_v2f64_nonans(<2 x double>* %x, double %y, <2 x i1>* %z)
 define void @fcmp_olt_vf_v16f16(<16 x half>* %x, half %y, <16 x i1>* %z) {
 ; CHECK-LABEL: fcmp_olt_vf_v16f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a3, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vmflt.vf v25, v26, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x half>, <16 x half>* %x
@@ -614,11 +587,10 @@ define void @fcmp_olt_vf_v16f16(<16 x half>* %x, half %y, <16 x i1>* %z) {
 define void @fcmp_olt_vf_v16f16_nonans(<16 x half>* %x, half %y, <16 x i1>* %z) {
 ; CHECK-LABEL: fcmp_olt_vf_v16f16_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a3, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vmflt.vf v25, v26, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x half>, <16 x half>* %x
@@ -632,11 +604,10 @@ define void @fcmp_olt_vf_v16f16_nonans(<16 x half>* %x, half %y, <16 x i1>* %z)
 define void @fcmp_oge_vf_v8f32(<8 x float>* %x, float %y, <8 x i1>* %z) {
 ; CHECK-LABEL: fcmp_oge_vf_v8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a3, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vmfge.vf v25, v26, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
@@ -650,11 +621,10 @@ define void @fcmp_oge_vf_v8f32(<8 x float>* %x, float %y, <8 x i1>* %z) {
 define void @fcmp_oge_vf_v8f32_nonans(<8 x float>* %x, float %y, <8 x i1>* %z) {
 ; CHECK-LABEL: fcmp_oge_vf_v8f32_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a3, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vmfge.vf v25, v26, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
@@ -668,11 +638,10 @@ define void @fcmp_oge_vf_v8f32_nonans(<8 x float>* %x, float %y, <8 x i1>* %z) {
 define void @fcmp_ole_vf_v4f64(<4 x double>* %x, double %y, <4 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ole_vf_v4f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a3, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; CHECK-NEXT:    vle64.v v26, (a0)
 ; CHECK-NEXT:    vmfle.vf v25, v26, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 4, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x double>, <4 x double>* %x
@@ -686,11 +655,10 @@ define void @fcmp_ole_vf_v4f64(<4 x double>* %x, double %y, <4 x i1>* %z) {
 define void @fcmp_ole_vf_v4f64_nonans(<4 x double>* %x, double %y, <4 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ole_vf_v4f64_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a3, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; CHECK-NEXT:    vle64.v v26, (a0)
 ; CHECK-NEXT:    vmfle.vf v25, v26, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 4, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x double>, <4 x double>* %x
@@ -741,11 +709,10 @@ define void @fcmp_ule_vf_v32f16_nonans(<32 x half>* %x, half %y, <32 x i1>* %z)
 define void @fcmp_uge_vf_v16f32(<16 x float>* %x, float %y, <16 x i1>* %z) {
 ; CHECK-LABEL: fcmp_uge_vf_v16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a3, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
 ; CHECK-NEXT:    vmflt.vf v25, v28, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v25, v25, v25
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
@@ -760,11 +727,10 @@ define void @fcmp_uge_vf_v16f32(<16 x float>* %x, float %y, <16 x i1>* %z) {
 define void @fcmp_uge_vf_v16f32_nonans(<16 x float>* %x, float %y, <16 x i1>* %z) {
 ; CHECK-LABEL: fcmp_uge_vf_v16f32_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a3, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
 ; CHECK-NEXT:    vmfge.vf v25, v28, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x float>, <16 x float>* %x
@@ -778,13 +744,12 @@ define void @fcmp_uge_vf_v16f32_nonans(<16 x float>* %x, float %y, <16 x i1>* %z
 define void @fcmp_ult_vf_v8f64(<8 x double>* %x, double %y, <8 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ult_vf_v8f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a3, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
 ; CHECK-NEXT:    vmfge.vf v25, v28, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmnand.mm v25, v25, v25
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %x
@@ -798,11 +763,10 @@ define void @fcmp_ult_vf_v8f64(<8 x double>* %x, double %y, <8 x i1>* %z) {
 define void @fcmp_ult_vf_v8f64_nonans(<8 x double>* %x, double %y, <8 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ult_vf_v8f64_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a3, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
 ; CHECK-NEXT:    vmflt.vf v25, v28, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %x
@@ -891,12 +855,11 @@ define void @fcmp_ueq_vf_v32f32_nonans(<32 x float>* %x, float %y, <32 x i1>* %z
 define void @fcmp_one_vf_v8f64(<16 x double>* %x, double %y, <16 x i1>* %z) {
 ; CHECK-LABEL: fcmp_one_vf_v8f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a3, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vmfgt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmflt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v25, v26, v25
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
@@ -911,11 +874,10 @@ define void @fcmp_one_vf_v8f64(<16 x double>* %x, double %y, <16 x i1>* %z) {
 define void @fcmp_one_vf_v8f64_nonans(<16 x double>* %x, double %y, <16 x i1>* %z) {
 ; CHECK-LABEL: fcmp_one_vf_v8f64_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a3, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vmfne.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x double>, <16 x double>* %x
@@ -929,15 +891,14 @@ define void @fcmp_one_vf_v8f64_nonans(<16 x double>* %x, double %y, <16 x i1>* %
 define void @fcmp_ord_vf_v4f16(<4 x half>* %x, half %y, <4 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ord_vf_v4f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
 ; CHECK-NEXT:    vmfeq.vf v27, v26, fa0
 ; CHECK-NEXT:    vmfeq.vv v26, v25, v25
-; CHECK-NEXT:    vsetvli a0, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 4, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmand.mm v25, v26, v27
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 4, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x half>, <4 x half>* %x
@@ -951,15 +912,14 @@ define void @fcmp_ord_vf_v4f16(<4 x half>* %x, half %y, <4 x i1>* %z) {
 define void @fcmp_uno_vf_v4f16(<2 x half>* %x, half %y, <2 x i1>* %z) {
 ; CHECK-LABEL: fcmp_uno_vf_v4f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
 ; CHECK-NEXT:    vmfne.vf v27, v26, fa0
 ; CHECK-NEXT:    vmfne.vv v26, v25, v25
-; CHECK-NEXT:    vsetvli a0, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 2, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmor.mm v25, v26, v27
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 2, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <2 x half>, <2 x half>* %x
@@ -973,11 +933,10 @@ define void @fcmp_uno_vf_v4f16(<2 x half>* %x, half %y, <2 x i1>* %z) {
 define void @fcmp_oeq_fv_v8f16(<8 x half>* %x, half %y, <8 x i1>* %z) {
 ; CHECK-LABEL: fcmp_oeq_fv_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vmfeq.vf v26, v25, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v26, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
@@ -991,11 +950,10 @@ define void @fcmp_oeq_fv_v8f16(<8 x half>* %x, half %y, <8 x i1>* %z) {
 define void @fcmp_oeq_fv_v8f16_nonans(<8 x half>* %x, half %y, <8 x i1>* %z) {
 ; CHECK-LABEL: fcmp_oeq_fv_v8f16_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vmfeq.vf v26, v25, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v26, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
@@ -1009,11 +967,10 @@ define void @fcmp_oeq_fv_v8f16_nonans(<8 x half>* %x, half %y, <8 x i1>* %z) {
 define void @fcmp_une_fv_v4f32(<4 x float>* %x, float %y, <4 x i1>* %z) {
 ; CHECK-LABEL: fcmp_une_fv_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vmfne.vf v26, v25, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 4, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v26, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
@@ -1027,11 +984,10 @@ define void @fcmp_une_fv_v4f32(<4 x float>* %x, float %y, <4 x i1>* %z) {
 define void @fcmp_une_fv_v4f32_nonans(<4 x float>* %x, float %y, <4 x i1>* %z) {
 ; CHECK-LABEL: fcmp_une_fv_v4f32_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vmfne.vf v26, v25, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 4, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v26, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
@@ -1045,11 +1001,10 @@ define void @fcmp_une_fv_v4f32_nonans(<4 x float>* %x, float %y, <4 x i1>* %z) {
 define void @fcmp_ogt_fv_v2f64(<2 x double>* %x, double %y, <2 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ogt_fv_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vmflt.vf v26, v25, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 2, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v26, (a1)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
@@ -1063,11 +1018,10 @@ define void @fcmp_ogt_fv_v2f64(<2 x double>* %x, double %y, <2 x i1>* %z) {
 define void @fcmp_ogt_fv_v2f64_nonans(<2 x double>* %x, double %y, <2 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ogt_fv_v2f64_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vmflt.vf v26, v25, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 2, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v26, (a1)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
@@ -1081,11 +1035,10 @@ define void @fcmp_ogt_fv_v2f64_nonans(<2 x double>* %x, double %y, <2 x i1>* %z)
 define void @fcmp_olt_fv_v16f16(<16 x half>* %x, half %y, <16 x i1>* %z) {
 ; CHECK-LABEL: fcmp_olt_fv_v16f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a3, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vmfgt.vf v25, v26, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x half>, <16 x half>* %x
@@ -1099,11 +1052,10 @@ define void @fcmp_olt_fv_v16f16(<16 x half>* %x, half %y, <16 x i1>* %z) {
 define void @fcmp_olt_fv_v16f16_nonans(<16 x half>* %x, half %y, <16 x i1>* %z) {
 ; CHECK-LABEL: fcmp_olt_fv_v16f16_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a3, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vmfgt.vf v25, v26, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x half>, <16 x half>* %x
@@ -1117,11 +1069,10 @@ define void @fcmp_olt_fv_v16f16_nonans(<16 x half>* %x, half %y, <16 x i1>* %z)
 define void @fcmp_oge_fv_v8f32(<8 x float>* %x, float %y, <8 x i1>* %z) {
 ; CHECK-LABEL: fcmp_oge_fv_v8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a3, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vmfle.vf v25, v26, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
@@ -1135,11 +1086,10 @@ define void @fcmp_oge_fv_v8f32(<8 x float>* %x, float %y, <8 x i1>* %z) {
 define void @fcmp_oge_fv_v8f32_nonans(<8 x float>* %x, float %y, <8 x i1>* %z) {
 ; CHECK-LABEL: fcmp_oge_fv_v8f32_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a3, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vmfle.vf v25, v26, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %x
@@ -1153,11 +1103,10 @@ define void @fcmp_oge_fv_v8f32_nonans(<8 x float>* %x, float %y, <8 x i1>* %z) {
 define void @fcmp_ole_fv_v4f64(<4 x double>* %x, double %y, <4 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ole_fv_v4f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a3, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; CHECK-NEXT:    vle64.v v26, (a0)
 ; CHECK-NEXT:    vmfge.vf v25, v26, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 4, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x double>, <4 x double>* %x
@@ -1171,11 +1120,10 @@ define void @fcmp_ole_fv_v4f64(<4 x double>* %x, double %y, <4 x i1>* %z) {
 define void @fcmp_ole_fv_v4f64_nonans(<4 x double>* %x, double %y, <4 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ole_fv_v4f64_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a3, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; CHECK-NEXT:    vle64.v v26, (a0)
 ; CHECK-NEXT:    vmfge.vf v25, v26, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 4, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x double>, <4 x double>* %x
@@ -1226,11 +1174,10 @@ define void @fcmp_ule_fv_v32f16_nonans(<32 x half>* %x, half %y, <32 x i1>* %z)
 define void @fcmp_uge_fv_v16f32(<16 x float>* %x, float %y, <16 x i1>* %z) {
 ; CHECK-LABEL: fcmp_uge_fv_v16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a3, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
 ; CHECK-NEXT:    vmfgt.vf v25, v28, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v25, v25, v25
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
@@ -1245,11 +1192,10 @@ define void @fcmp_uge_fv_v16f32(<16 x float>* %x, float %y, <16 x i1>* %z) {
 define void @fcmp_uge_fv_v16f32_nonans(<16 x float>* %x, float %y, <16 x i1>* %z) {
 ; CHECK-LABEL: fcmp_uge_fv_v16f32_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a3, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a0)
 ; CHECK-NEXT:    vmfle.vf v25, v28, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x float>, <16 x float>* %x
@@ -1263,13 +1209,12 @@ define void @fcmp_uge_fv_v16f32_nonans(<16 x float>* %x, float %y, <16 x i1>* %z
 define void @fcmp_ult_fv_v8f64(<8 x double>* %x, double %y, <8 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ult_fv_v8f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a3, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
 ; CHECK-NEXT:    vmfle.vf v25, v28, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmnand.mm v25, v25, v25
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %x
@@ -1283,11 +1228,10 @@ define void @fcmp_ult_fv_v8f64(<8 x double>* %x, double %y, <8 x i1>* %z) {
 define void @fcmp_ult_fv_v8f64_nonans(<8 x double>* %x, double %y, <8 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ult_fv_v8f64_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a3, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a0)
 ; CHECK-NEXT:    vmfgt.vf v25, v28, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %x
@@ -1376,12 +1320,11 @@ define void @fcmp_ueq_fv_v32f32_nonans(<32 x float>* %x, float %y, <32 x i1>* %z
 define void @fcmp_one_fv_v8f64(<16 x double>* %x, double %y, <16 x i1>* %z) {
 ; CHECK-LABEL: fcmp_one_fv_v8f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a3, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vmflt.vf v25, v8, fa0
 ; CHECK-NEXT:    vmfgt.vf v26, v8, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v25, v26, v25
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
@@ -1396,11 +1339,10 @@ define void @fcmp_one_fv_v8f64(<16 x double>* %x, double %y, <16 x i1>* %z) {
 define void @fcmp_one_fv_v8f64_nonans(<16 x double>* %x, double %y, <16 x i1>* %z) {
 ; CHECK-LABEL: fcmp_one_fv_v8f64_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a3, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vmfne.vf v25, v8, fa0
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x double>, <16 x double>* %x
@@ -1414,15 +1356,14 @@ define void @fcmp_one_fv_v8f64_nonans(<16 x double>* %x, double %y, <16 x i1>* %
 define void @fcmp_ord_fv_v4f16(<4 x half>* %x, half %y, <4 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ord_fv_v4f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
 ; CHECK-NEXT:    vmfeq.vf v27, v26, fa0
 ; CHECK-NEXT:    vmfeq.vv v26, v25, v25
-; CHECK-NEXT:    vsetvli a0, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 4, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmand.mm v25, v27, v26
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 4, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x half>, <4 x half>* %x
@@ -1436,15 +1377,14 @@ define void @fcmp_ord_fv_v4f16(<4 x half>* %x, half %y, <4 x i1>* %z) {
 define void @fcmp_uno_fv_v4f16(<2 x half>* %x, half %y, <2 x i1>* %z) {
 ; CHECK-LABEL: fcmp_uno_fv_v4f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
 ; CHECK-NEXT:    vmfne.vf v27, v26, fa0
 ; CHECK-NEXT:    vmfne.vv v26, v25, v25
-; CHECK-NEXT:    vsetvli a0, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 2, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmor.mm v25, v27, v26
-; CHECK-NEXT:    vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 2, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <2 x half>, <2 x half>* %x

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll
index aa76874c2220..6e25c28ea4f1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll
@@ -7,8 +7,7 @@
 define void @splat_v8f16(<8 x half>* %x, half %y) {
 ; CHECK-LABEL: splat_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v25, fa0
 ; CHECK-NEXT:    vse16.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -21,8 +20,7 @@ define void @splat_v8f16(<8 x half>* %x, half %y) {
 define void @splat_v4f32(<4 x float>* %x, float %y) {
 ; CHECK-LABEL: splat_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v25, fa0
 ; CHECK-NEXT:    vse32.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -35,8 +33,7 @@ define void @splat_v4f32(<4 x float>* %x, float %y) {
 define void @splat_v2f64(<2 x double>* %x, double %y) {
 ; CHECK-LABEL: splat_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v25, fa0
 ; CHECK-NEXT:    vse64.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -49,16 +46,14 @@ define void @splat_v2f64(<2 x double>* %x, double %y) {
 define void @splat_16f16(<16 x half>* %x, half %y) {
 ; LMULMAX2-LABEL: splat_16f16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 16
-; LMULMAX2-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vfmv.v.f v26, fa0
 ; LMULMAX2-NEXT:    vse16.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_16f16:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 8
-; LMULMAX1-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; LMULMAX1-NEXT:    vfmv.v.f v25, fa0
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse16.v v25, (a1)
@@ -73,16 +68,14 @@ define void @splat_16f16(<16 x half>* %x, half %y) {
 define void @splat_v8f32(<8 x float>* %x, float %y) {
 ; LMULMAX2-LABEL: splat_v8f32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 8
-; LMULMAX2-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vfmv.v.f v26, fa0
 ; LMULMAX2-NEXT:    vse32.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_v8f32:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 4
-; LMULMAX1-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; LMULMAX1-NEXT:    vfmv.v.f v25, fa0
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse32.v v25, (a1)
@@ -97,16 +90,14 @@ define void @splat_v8f32(<8 x float>* %x, float %y) {
 define void @splat_v4f64(<4 x double>* %x, double %y) {
 ; LMULMAX2-LABEL: splat_v4f64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 4
-; LMULMAX2-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vfmv.v.f v26, fa0
 ; LMULMAX2-NEXT:    vse64.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_v4f64:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 2
-; LMULMAX1-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vfmv.v.f v25, fa0
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse64.v v25, (a1)
@@ -121,8 +112,7 @@ define void @splat_v4f64(<4 x double>* %x, double %y) {
 define void @splat_zero_v8f16(<8 x half>* %x) {
 ; CHECK-LABEL: splat_zero_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vse16.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -135,8 +125,7 @@ define void @splat_zero_v8f16(<8 x half>* %x) {
 define void @splat_zero_v4f32(<4 x float>* %x) {
 ; CHECK-LABEL: splat_zero_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vse32.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -149,8 +138,7 @@ define void @splat_zero_v4f32(<4 x float>* %x) {
 define void @splat_zero_v2f64(<2 x double>* %x) {
 ; CHECK-LABEL: splat_zero_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vse64.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -163,16 +151,14 @@ define void @splat_zero_v2f64(<2 x double>* %x) {
 define void @splat_zero_16f16(<16 x half>* %x) {
 ; LMULMAX2-LABEL: splat_zero_16f16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 16
-; LMULMAX2-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.i v26, 0
 ; LMULMAX2-NEXT:    vse16.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_zero_16f16:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 8
-; LMULMAX1-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v25, 0
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse16.v v25, (a1)
@@ -187,16 +173,14 @@ define void @splat_zero_16f16(<16 x half>* %x) {
 define void @splat_zero_v8f32(<8 x float>* %x) {
 ; LMULMAX2-LABEL: splat_zero_v8f32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 8
-; LMULMAX2-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.i v26, 0
 ; LMULMAX2-NEXT:    vse32.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_zero_v8f32:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 4
-; LMULMAX1-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v25, 0
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse32.v v25, (a1)
@@ -211,16 +195,14 @@ define void @splat_zero_v8f32(<8 x float>* %x) {
 define void @splat_zero_v4f64(<4 x double>* %x) {
 ; LMULMAX2-LABEL: splat_zero_v4f64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 4
-; LMULMAX2-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.i v26, 0
 ; LMULMAX2-NEXT:    vse64.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_zero_v4f64:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 2
-; LMULMAX1-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v25, 0
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse64.v v25, (a1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
index 6da054afc73c..c8f0dd8df210 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
@@ -7,8 +7,7 @@
 define void @gather_const_v8f16(<8 x half>* %x) {
 ; CHECK-LABEL: gather_const_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vrgather.vi v26, v25, 5
 ; CHECK-NEXT:    vse16.v v26, (a0)
@@ -24,8 +23,7 @@ define void @gather_const_v8f16(<8 x half>* %x) {
 define void @gather_const_v4f32(<4 x float>* %x) {
 ; CHECK-LABEL: gather_const_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vrgather.vi v26, v25, 2
 ; CHECK-NEXT:    vse32.v v26, (a0)
@@ -41,8 +39,7 @@ define void @gather_const_v4f32(<4 x float>* %x) {
 define void @gather_const_v2f64(<2 x double>* %x) {
 ; CHECK-LABEL: gather_const_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vrgather.vi v26, v25, 0
 ; CHECK-NEXT:    vse64.v v26, (a0)
@@ -69,8 +66,7 @@ define void @gather_const_v64f16(<64 x half>* %x) {
 ; LMULMAX1-LABEL: gather_const_v64f16:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    addi a1, a0, 80
-; LMULMAX1-NEXT:    addi a2, zero, 8
-; LMULMAX1-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-NEXT:    vle16.v v25, (a1)
 ; LMULMAX1-NEXT:    addi a6, a0, 16
 ; LMULMAX1-NEXT:    addi a7, a0, 48
@@ -109,8 +105,7 @@ define void @gather_const_v32f32(<32 x float>* %x) {
 ; LMULMAX1-LABEL: gather_const_v32f32:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    addi a1, a0, 64
-; LMULMAX1-NEXT:    addi a2, zero, 4
-; LMULMAX1-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-NEXT:    vle32.v v25, (a1)
 ; LMULMAX1-NEXT:    addi a6, a0, 16
 ; LMULMAX1-NEXT:    addi a7, a0, 48
@@ -139,8 +134,7 @@ define void @gather_const_v32f32(<32 x float>* %x) {
 define void @gather_const_v16f64(<16 x double>* %x) {
 ; LMULMAX8-LABEL: gather_const_v16f64:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a1, zero, 16
-; LMULMAX8-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
+; LMULMAX8-NEXT:    vsetivli a1, 16, e64,m8,ta,mu
 ; LMULMAX8-NEXT:    vle64.v v8, (a0)
 ; LMULMAX8-NEXT:    vrgather.vi v16, v8, 10
 ; LMULMAX8-NEXT:    vse64.v v16, (a0)
@@ -149,8 +143,7 @@ define void @gather_const_v16f64(<16 x double>* %x) {
 ; LMULMAX1-LABEL: gather_const_v16f64:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    addi a1, a0, 80
-; LMULMAX1-NEXT:    addi a2, zero, 2
-; LMULMAX1-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vle64.v v25, (a1)
 ; LMULMAX1-NEXT:    addi a6, a0, 16
 ; LMULMAX1-NEXT:    addi a7, a0, 48

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
index be8d54f287b7..99c33f879278 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
@@ -7,8 +7,7 @@
 define void @fadd_v8f16(<8 x half>* %x, <8 x half>* %y) {
 ; CHECK-LABEL: fadd_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vfadd.vv v25, v25, v26
@@ -24,8 +23,7 @@ define void @fadd_v8f16(<8 x half>* %x, <8 x half>* %y) {
 define void @fadd_v4f32(<4 x float>* %x, <4 x float>* %y) {
 ; CHECK-LABEL: fadd_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vfadd.vv v25, v25, v26
@@ -41,8 +39,7 @@ define void @fadd_v4f32(<4 x float>* %x, <4 x float>* %y) {
 define void @fadd_v2f64(<2 x double>* %x, <2 x double>* %y) {
 ; CHECK-LABEL: fadd_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vfadd.vv v25, v25, v26
@@ -58,8 +55,7 @@ define void @fadd_v2f64(<2 x double>* %x, <2 x double>* %y) {
 define void @fsub_v8f16(<8 x half>* %x, <8 x half>* %y) {
 ; CHECK-LABEL: fsub_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vfsub.vv v25, v25, v26
@@ -75,8 +71,7 @@ define void @fsub_v8f16(<8 x half>* %x, <8 x half>* %y) {
 define void @fsub_v4f32(<4 x float>* %x, <4 x float>* %y) {
 ; CHECK-LABEL: fsub_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vfsub.vv v25, v25, v26
@@ -92,8 +87,7 @@ define void @fsub_v4f32(<4 x float>* %x, <4 x float>* %y) {
 define void @fsub_v2f64(<2 x double>* %x, <2 x double>* %y) {
 ; CHECK-LABEL: fsub_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vfsub.vv v25, v25, v26
@@ -109,8 +103,7 @@ define void @fsub_v2f64(<2 x double>* %x, <2 x double>* %y) {
 define void @fmul_v8f16(<8 x half>* %x, <8 x half>* %y) {
 ; CHECK-LABEL: fmul_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vfmul.vv v25, v25, v26
@@ -126,8 +119,7 @@ define void @fmul_v8f16(<8 x half>* %x, <8 x half>* %y) {
 define void @fmul_v4f32(<4 x float>* %x, <4 x float>* %y) {
 ; CHECK-LABEL: fmul_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vfmul.vv v25, v25, v26
@@ -143,8 +135,7 @@ define void @fmul_v4f32(<4 x float>* %x, <4 x float>* %y) {
 define void @fmul_v2f64(<2 x double>* %x, <2 x double>* %y) {
 ; CHECK-LABEL: fmul_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vfmul.vv v25, v25, v26
@@ -160,8 +151,7 @@ define void @fmul_v2f64(<2 x double>* %x, <2 x double>* %y) {
 define void @fdiv_v8f16(<8 x half>* %x, <8 x half>* %y) {
 ; CHECK-LABEL: fdiv_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vfdiv.vv v25, v25, v26
@@ -177,8 +167,7 @@ define void @fdiv_v8f16(<8 x half>* %x, <8 x half>* %y) {
 define void @fdiv_v4f32(<4 x float>* %x, <4 x float>* %y) {
 ; CHECK-LABEL: fdiv_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vfdiv.vv v25, v25, v26
@@ -194,8 +183,7 @@ define void @fdiv_v4f32(<4 x float>* %x, <4 x float>* %y) {
 define void @fdiv_v2f64(<2 x double>* %x, <2 x double>* %y) {
 ; CHECK-LABEL: fdiv_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vfdiv.vv v25, v25, v26
@@ -211,8 +199,7 @@ define void @fdiv_v2f64(<2 x double>* %x, <2 x double>* %y) {
 define void @fneg_v8f16(<8 x half>* %x) {
 ; CHECK-LABEL: fneg_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vfsgnjn.vv v25, v25, v25
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -226,8 +213,7 @@ define void @fneg_v8f16(<8 x half>* %x) {
 define void @fneg_v4f32(<4 x float>* %x) {
 ; CHECK-LABEL: fneg_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vfsgnjn.vv v25, v25, v25
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -241,8 +227,7 @@ define void @fneg_v4f32(<4 x float>* %x) {
 define void @fneg_v2f64(<2 x double>* %x) {
 ; CHECK-LABEL: fneg_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vfsgnjn.vv v25, v25, v25
 ; CHECK-NEXT:    vse64.v v25, (a0)
@@ -256,8 +241,7 @@ define void @fneg_v2f64(<2 x double>* %x) {
 define void @fabs_v8f16(<8 x half>* %x) {
 ; CHECK-LABEL: fabs_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vfsgnjx.vv v25, v25, v25
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -272,8 +256,7 @@ declare <8 x half> @llvm.fabs.v8f16(<8 x half>)
 define void @fabs_v4f32(<4 x float>* %x) {
 ; CHECK-LABEL: fabs_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vfsgnjx.vv v25, v25, v25
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -288,8 +271,7 @@ declare <4 x float> @llvm.fabs.v4f32(<4 x float>)
 define void @fabs_v2f64(<2 x double>* %x) {
 ; CHECK-LABEL: fabs_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vfsgnjx.vv v25, v25, v25
 ; CHECK-NEXT:    vse64.v v25, (a0)
@@ -304,8 +286,7 @@ declare <2 x double> @llvm.fabs.v2f64(<2 x double>)
 define void @sqrt_v8f16(<8 x half>* %x) {
 ; CHECK-LABEL: sqrt_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vfsqrt.v v25, v25
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -320,8 +301,7 @@ declare <8 x half> @llvm.sqrt.v8f16(<8 x half>)
 define void @sqrt_v4f32(<4 x float>* %x) {
 ; CHECK-LABEL: sqrt_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vfsqrt.v v25, v25
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -336,8 +316,7 @@ declare <4 x float> @llvm.sqrt.v4f32(<4 x float>)
 define void @sqrt_v2f64(<2 x double>* %x) {
 ; CHECK-LABEL: sqrt_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vfsqrt.v v25, v25
 ; CHECK-NEXT:    vse64.v v25, (a0)
@@ -352,8 +331,7 @@ declare <2 x double> @llvm.sqrt.v2f64(<2 x double>)
 define void @fma_v8f16(<8 x half>* %x, <8 x half>* %y, <8 x half>* %z) {
 ; CHECK-LABEL: fma_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 8
-; CHECK-NEXT:    vsetvli a3, a3, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vle16.v v27, (a2)
@@ -372,8 +350,7 @@ declare <8 x half> @llvm.fma.v8f16(<8 x half>, <8 x half>, <8 x half>)
 define void @fma_v4f32(<4 x float>* %x, <4 x float>* %y, <4 x float>* %z) {
 ; CHECK-LABEL: fma_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 4
-; CHECK-NEXT:    vsetvli a3, a3, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vle32.v v27, (a2)
@@ -392,8 +369,7 @@ declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
 define void @fma_v2f64(<2 x double>* %x, <2 x double>* %y, <2 x double>* %z) {
 ; CHECK-LABEL: fma_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 2
-; CHECK-NEXT:    vsetvli a3, a3, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vle64.v v27, (a2)
@@ -412,8 +388,7 @@ declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>)
 define void @fmsub_v8f16(<8 x half>* %x, <8 x half>* %y, <8 x half>* %z) {
 ; CHECK-LABEL: fmsub_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 8
-; CHECK-NEXT:    vsetvli a3, a3, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vle16.v v27, (a2)
@@ -432,8 +407,7 @@ define void @fmsub_v8f16(<8 x half>* %x, <8 x half>* %y, <8 x half>* %z) {
 define void @fnmsub_v4f32(<4 x float>* %x, <4 x float>* %y, <4 x float>* %z) {
 ; CHECK-LABEL: fnmsub_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 4
-; CHECK-NEXT:    vsetvli a3, a3, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vle32.v v27, (a2)
@@ -452,8 +426,7 @@ define void @fnmsub_v4f32(<4 x float>* %x, <4 x float>* %y, <4 x float>* %z) {
 define void @fnmadd_v2f64(<2 x double>* %x, <2 x double>* %y, <2 x double>* %z) {
 ; CHECK-LABEL: fnmadd_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 2
-; CHECK-NEXT:    vsetvli a3, a3, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vle64.v v27, (a2)
@@ -473,8 +446,7 @@ define void @fnmadd_v2f64(<2 x double>* %x, <2 x double>* %y, <2 x double>* %z)
 define void @fadd_v16f16(<16 x half>* %x, <16 x half>* %y) {
 ; LMULMAX2-LABEL: fadd_v16f16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vfadd.vv v26, v26, v28
@@ -483,8 +455,7 @@ define void @fadd_v16f16(<16 x half>* %x, <16 x half>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: fadd_v16f16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -499,8 +470,7 @@ define void @fadd_v16f16(<16 x half>* %x, <16 x half>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: fadd_v16f16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -522,8 +492,7 @@ define void @fadd_v16f16(<16 x half>* %x, <16 x half>* %y) {
 define void @fadd_v8f32(<8 x float>* %x, <8 x float>* %y) {
 ; LMULMAX2-LABEL: fadd_v8f32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vfadd.vv v26, v26, v28
@@ -532,8 +501,7 @@ define void @fadd_v8f32(<8 x float>* %x, <8 x float>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: fadd_v8f32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -548,8 +516,7 @@ define void @fadd_v8f32(<8 x float>* %x, <8 x float>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: fadd_v8f32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -571,8 +538,7 @@ define void @fadd_v8f32(<8 x float>* %x, <8 x float>* %y) {
 define void @fadd_v4f64(<4 x double>* %x, <4 x double>* %y) {
 ; LMULMAX2-LABEL: fadd_v4f64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vfadd.vv v26, v26, v28
@@ -581,8 +547,7 @@ define void @fadd_v4f64(<4 x double>* %x, <4 x double>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: fadd_v4f64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -597,8 +562,7 @@ define void @fadd_v4f64(<4 x double>* %x, <4 x double>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: fadd_v4f64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -620,8 +584,7 @@ define void @fadd_v4f64(<4 x double>* %x, <4 x double>* %y) {
 define void @fsub_v16f16(<16 x half>* %x, <16 x half>* %y) {
 ; LMULMAX2-LABEL: fsub_v16f16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vfsub.vv v26, v26, v28
@@ -630,8 +593,7 @@ define void @fsub_v16f16(<16 x half>* %x, <16 x half>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: fsub_v16f16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -646,8 +608,7 @@ define void @fsub_v16f16(<16 x half>* %x, <16 x half>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: fsub_v16f16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -669,8 +630,7 @@ define void @fsub_v16f16(<16 x half>* %x, <16 x half>* %y) {
 define void @fsub_v8f32(<8 x float>* %x, <8 x float>* %y) {
 ; LMULMAX2-LABEL: fsub_v8f32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vfsub.vv v26, v26, v28
@@ -679,8 +639,7 @@ define void @fsub_v8f32(<8 x float>* %x, <8 x float>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: fsub_v8f32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -695,8 +654,7 @@ define void @fsub_v8f32(<8 x float>* %x, <8 x float>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: fsub_v8f32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -718,8 +676,7 @@ define void @fsub_v8f32(<8 x float>* %x, <8 x float>* %y) {
 define void @fsub_v4f64(<4 x double>* %x, <4 x double>* %y) {
 ; LMULMAX2-LABEL: fsub_v4f64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vfsub.vv v26, v26, v28
@@ -728,8 +685,7 @@ define void @fsub_v4f64(<4 x double>* %x, <4 x double>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: fsub_v4f64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -744,8 +700,7 @@ define void @fsub_v4f64(<4 x double>* %x, <4 x double>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: fsub_v4f64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -767,8 +722,7 @@ define void @fsub_v4f64(<4 x double>* %x, <4 x double>* %y) {
 define void @fmul_v16f16(<16 x half>* %x, <16 x half>* %y) {
 ; LMULMAX2-LABEL: fmul_v16f16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vfmul.vv v26, v26, v28
@@ -777,8 +731,7 @@ define void @fmul_v16f16(<16 x half>* %x, <16 x half>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: fmul_v16f16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -793,8 +746,7 @@ define void @fmul_v16f16(<16 x half>* %x, <16 x half>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: fmul_v16f16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -816,8 +768,7 @@ define void @fmul_v16f16(<16 x half>* %x, <16 x half>* %y) {
 define void @fmul_v8f32(<8 x float>* %x, <8 x float>* %y) {
 ; LMULMAX2-LABEL: fmul_v8f32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vfmul.vv v26, v26, v28
@@ -826,8 +777,7 @@ define void @fmul_v8f32(<8 x float>* %x, <8 x float>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: fmul_v8f32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -842,8 +792,7 @@ define void @fmul_v8f32(<8 x float>* %x, <8 x float>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: fmul_v8f32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -865,8 +814,7 @@ define void @fmul_v8f32(<8 x float>* %x, <8 x float>* %y) {
 define void @fmul_v4f64(<4 x double>* %x, <4 x double>* %y) {
 ; LMULMAX2-LABEL: fmul_v4f64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vfmul.vv v26, v26, v28
@@ -875,8 +823,7 @@ define void @fmul_v4f64(<4 x double>* %x, <4 x double>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: fmul_v4f64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -891,8 +838,7 @@ define void @fmul_v4f64(<4 x double>* %x, <4 x double>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: fmul_v4f64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -914,8 +860,7 @@ define void @fmul_v4f64(<4 x double>* %x, <4 x double>* %y) {
 define void @fdiv_v16f16(<16 x half>* %x, <16 x half>* %y) {
 ; LMULMAX2-LABEL: fdiv_v16f16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vfdiv.vv v26, v26, v28
@@ -924,8 +869,7 @@ define void @fdiv_v16f16(<16 x half>* %x, <16 x half>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: fdiv_v16f16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -940,8 +884,7 @@ define void @fdiv_v16f16(<16 x half>* %x, <16 x half>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: fdiv_v16f16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -963,8 +906,7 @@ define void @fdiv_v16f16(<16 x half>* %x, <16 x half>* %y) {
 define void @fdiv_v8f32(<8 x float>* %x, <8 x float>* %y) {
 ; LMULMAX2-LABEL: fdiv_v8f32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vfdiv.vv v26, v26, v28
@@ -973,8 +915,7 @@ define void @fdiv_v8f32(<8 x float>* %x, <8 x float>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: fdiv_v8f32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -989,8 +930,7 @@ define void @fdiv_v8f32(<8 x float>* %x, <8 x float>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: fdiv_v8f32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -1012,8 +952,7 @@ define void @fdiv_v8f32(<8 x float>* %x, <8 x float>* %y) {
 define void @fdiv_v4f64(<4 x double>* %x, <4 x double>* %y) {
 ; LMULMAX2-LABEL: fdiv_v4f64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vfdiv.vv v26, v26, v28
@@ -1022,8 +961,7 @@ define void @fdiv_v4f64(<4 x double>* %x, <4 x double>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: fdiv_v4f64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -1038,8 +976,7 @@ define void @fdiv_v4f64(<4 x double>* %x, <4 x double>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: fdiv_v4f64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -1061,8 +998,7 @@ define void @fdiv_v4f64(<4 x double>* %x, <4 x double>* %y) {
 define void @fneg_v16f16(<16 x half>* %x) {
 ; LMULMAX2-LABEL: fneg_v16f16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 16
-; LMULMAX2-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vfsgnjn.vv v26, v26, v26
 ; LMULMAX2-NEXT:    vse16.v v26, (a0)
@@ -1070,8 +1006,7 @@ define void @fneg_v16f16(<16 x half>* %x) {
 ;
 ; LMULMAX1-LABEL: fneg_v16f16:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 8
-; LMULMAX1-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vle16.v v25, (a1)
 ; LMULMAX1-NEXT:    vle16.v v26, (a0)
@@ -1089,8 +1024,7 @@ define void @fneg_v16f16(<16 x half>* %x) {
 define void @fneg_v8f32(<8 x float>* %x) {
 ; LMULMAX2-LABEL: fneg_v8f32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 8
-; LMULMAX2-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vfsgnjn.vv v26, v26, v26
 ; LMULMAX2-NEXT:    vse32.v v26, (a0)
@@ -1098,8 +1032,7 @@ define void @fneg_v8f32(<8 x float>* %x) {
 ;
 ; LMULMAX1-LABEL: fneg_v8f32:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 4
-; LMULMAX1-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vle32.v v25, (a1)
 ; LMULMAX1-NEXT:    vle32.v v26, (a0)
@@ -1117,8 +1050,7 @@ define void @fneg_v8f32(<8 x float>* %x) {
 define void @fneg_v4f64(<4 x double>* %x) {
 ; LMULMAX2-LABEL: fneg_v4f64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 4
-; LMULMAX2-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vfsgnjn.vv v26, v26, v26
 ; LMULMAX2-NEXT:    vse64.v v26, (a0)
@@ -1126,8 +1058,7 @@ define void @fneg_v4f64(<4 x double>* %x) {
 ;
 ; LMULMAX1-LABEL: fneg_v4f64:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 2
-; LMULMAX1-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vle64.v v25, (a1)
 ; LMULMAX1-NEXT:    vle64.v v26, (a0)
@@ -1145,8 +1076,7 @@ define void @fneg_v4f64(<4 x double>* %x) {
 define void @fma_v16f16(<16 x half>* %x, <16 x half>* %y, <16 x half>* %z) {
 ; LMULMAX2-LABEL: fma_v16f16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a3, zero, 16
-; LMULMAX2-NEXT:    vsetvli a3, a3, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a3, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vle16.v v30, (a2)
@@ -1156,8 +1086,7 @@ define void @fma_v16f16(<16 x half>* %x, <16 x half>* %y, <16 x half>* %z) {
 ;
 ; LMULMAX1-LABEL: fma_v16f16:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a3, zero, 8
-; LMULMAX1-NEXT:    vsetvli a3, a3, e16,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a3, 8, e16,m1,ta,mu
 ; LMULMAX1-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-NEXT:    addi a3, a0, 16
 ; LMULMAX1-NEXT:    vle16.v v26, (a3)
@@ -1184,8 +1113,7 @@ declare <16 x half> @llvm.fma.v16f16(<16 x half>, <16 x half>, <16 x half>)
 define void @fma_v8f32(<8 x float>* %x, <8 x float>* %y, <8 x float>* %z) {
 ; LMULMAX2-LABEL: fma_v8f32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a3, zero, 8
-; LMULMAX2-NEXT:    vsetvli a3, a3, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a3, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vle32.v v30, (a2)
@@ -1195,8 +1123,7 @@ define void @fma_v8f32(<8 x float>* %x, <8 x float>* %y, <8 x float>* %z) {
 ;
 ; LMULMAX1-LABEL: fma_v8f32:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a3, zero, 4
-; LMULMAX1-NEXT:    vsetvli a3, a3, e32,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a3, 4, e32,m1,ta,mu
 ; LMULMAX1-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-NEXT:    addi a3, a0, 16
 ; LMULMAX1-NEXT:    vle32.v v26, (a3)
@@ -1223,8 +1150,7 @@ declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>)
 define void @fma_v4f64(<4 x double>* %x, <4 x double>* %y, <4 x double>* %z) {
 ; LMULMAX2-LABEL: fma_v4f64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a3, zero, 4
-; LMULMAX2-NEXT:    vsetvli a3, a3, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a3, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vle64.v v30, (a2)
@@ -1234,8 +1160,7 @@ define void @fma_v4f64(<4 x double>* %x, <4 x double>* %y, <4 x double>* %z) {
 ;
 ; LMULMAX1-LABEL: fma_v4f64:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a3, zero, 2
-; LMULMAX1-NEXT:    vsetvli a3, a3, e64,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a3, 2, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-NEXT:    addi a3, a0, 16
 ; LMULMAX1-NEXT:    vle64.v v26, (a3)
@@ -1262,8 +1187,7 @@ declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>)
 define void @fadd_vf_v8f16(<8 x half>* %x, half %y) {
 ; CHECK-LABEL: fadd_vf_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vfadd.vf v25, v25, fa0
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -1279,8 +1203,7 @@ define void @fadd_vf_v8f16(<8 x half>* %x, half %y) {
 define void @fadd_vf_v4f32(<4 x float>* %x, float %y) {
 ; CHECK-LABEL: fadd_vf_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vfadd.vf v25, v25, fa0
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -1296,8 +1219,7 @@ define void @fadd_vf_v4f32(<4 x float>* %x, float %y) {
 define void @fadd_vf_v2f64(<2 x double>* %x, double %y) {
 ; CHECK-LABEL: fadd_vf_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vfadd.vf v25, v25, fa0
 ; CHECK-NEXT:    vse64.v v25, (a0)
@@ -1313,8 +1235,7 @@ define void @fadd_vf_v2f64(<2 x double>* %x, double %y) {
 define void @fadd_fv_v8f16(<8 x half>* %x, half %y) {
 ; CHECK-LABEL: fadd_fv_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vfadd.vf v25, v25, fa0
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -1330,8 +1251,7 @@ define void @fadd_fv_v8f16(<8 x half>* %x, half %y) {
 define void @fadd_fv_v4f32(<4 x float>* %x, float %y) {
 ; CHECK-LABEL: fadd_fv_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vfadd.vf v25, v25, fa0
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -1347,8 +1267,7 @@ define void @fadd_fv_v4f32(<4 x float>* %x, float %y) {
 define void @fadd_fv_v2f64(<2 x double>* %x, double %y) {
 ; CHECK-LABEL: fadd_fv_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vfadd.vf v25, v25, fa0
 ; CHECK-NEXT:    vse64.v v25, (a0)
@@ -1364,8 +1283,7 @@ define void @fadd_fv_v2f64(<2 x double>* %x, double %y) {
 define void @fsub_vf_v8f16(<8 x half>* %x, half %y) {
 ; CHECK-LABEL: fsub_vf_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vfsub.vf v25, v25, fa0
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -1381,8 +1299,7 @@ define void @fsub_vf_v8f16(<8 x half>* %x, half %y) {
 define void @fsub_vf_v4f32(<4 x float>* %x, float %y) {
 ; CHECK-LABEL: fsub_vf_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vfsub.vf v25, v25, fa0
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -1398,8 +1315,7 @@ define void @fsub_vf_v4f32(<4 x float>* %x, float %y) {
 define void @fsub_vf_v2f64(<2 x double>* %x, double %y) {
 ; CHECK-LABEL: fsub_vf_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vfsub.vf v25, v25, fa0
 ; CHECK-NEXT:    vse64.v v25, (a0)
@@ -1415,8 +1331,7 @@ define void @fsub_vf_v2f64(<2 x double>* %x, double %y) {
 define void @fsub_fv_v8f16(<8 x half>* %x, half %y) {
 ; CHECK-LABEL: fsub_fv_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vfrsub.vf v25, v25, fa0
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -1432,8 +1347,7 @@ define void @fsub_fv_v8f16(<8 x half>* %x, half %y) {
 define void @fsub_fv_v4f32(<4 x float>* %x, float %y) {
 ; CHECK-LABEL: fsub_fv_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vfrsub.vf v25, v25, fa0
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -1449,8 +1363,7 @@ define void @fsub_fv_v4f32(<4 x float>* %x, float %y) {
 define void @fsub_fv_v2f64(<2 x double>* %x, double %y) {
 ; CHECK-LABEL: fsub_fv_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vfrsub.vf v25, v25, fa0
 ; CHECK-NEXT:    vse64.v v25, (a0)
@@ -1466,8 +1379,7 @@ define void @fsub_fv_v2f64(<2 x double>* %x, double %y) {
 define void @fmul_vf_v8f16(<8 x half>* %x, half %y) {
 ; CHECK-LABEL: fmul_vf_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vfmul.vf v25, v25, fa0
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -1483,8 +1395,7 @@ define void @fmul_vf_v8f16(<8 x half>* %x, half %y) {
 define void @fmul_vf_v4f32(<4 x float>* %x, float %y) {
 ; CHECK-LABEL: fmul_vf_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vfmul.vf v25, v25, fa0
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -1500,8 +1411,7 @@ define void @fmul_vf_v4f32(<4 x float>* %x, float %y) {
 define void @fmul_vf_v2f64(<2 x double>* %x, double %y) {
 ; CHECK-LABEL: fmul_vf_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vfmul.vf v25, v25, fa0
 ; CHECK-NEXT:    vse64.v v25, (a0)
@@ -1517,8 +1427,7 @@ define void @fmul_vf_v2f64(<2 x double>* %x, double %y) {
 define void @fmul_fv_v8f16(<8 x half>* %x, half %y) {
 ; CHECK-LABEL: fmul_fv_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vfmul.vf v25, v25, fa0
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -1534,8 +1443,7 @@ define void @fmul_fv_v8f16(<8 x half>* %x, half %y) {
 define void @fmul_fv_v4f32(<4 x float>* %x, float %y) {
 ; CHECK-LABEL: fmul_fv_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vfmul.vf v25, v25, fa0
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -1551,8 +1459,7 @@ define void @fmul_fv_v4f32(<4 x float>* %x, float %y) {
 define void @fmul_fv_v2f64(<2 x double>* %x, double %y) {
 ; CHECK-LABEL: fmul_fv_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vfmul.vf v25, v25, fa0
 ; CHECK-NEXT:    vse64.v v25, (a0)
@@ -1568,8 +1475,7 @@ define void @fmul_fv_v2f64(<2 x double>* %x, double %y) {
 define void @fdiv_vf_v8f16(<8 x half>* %x, half %y) {
 ; CHECK-LABEL: fdiv_vf_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vfdiv.vf v25, v25, fa0
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -1585,8 +1491,7 @@ define void @fdiv_vf_v8f16(<8 x half>* %x, half %y) {
 define void @fdiv_vf_v4f32(<4 x float>* %x, float %y) {
 ; CHECK-LABEL: fdiv_vf_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vfdiv.vf v25, v25, fa0
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -1602,8 +1507,7 @@ define void @fdiv_vf_v4f32(<4 x float>* %x, float %y) {
 define void @fdiv_vf_v2f64(<2 x double>* %x, double %y) {
 ; CHECK-LABEL: fdiv_vf_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vfdiv.vf v25, v25, fa0
 ; CHECK-NEXT:    vse64.v v25, (a0)
@@ -1619,8 +1523,7 @@ define void @fdiv_vf_v2f64(<2 x double>* %x, double %y) {
 define void @fdiv_fv_v8f16(<8 x half>* %x, half %y) {
 ; CHECK-LABEL: fdiv_fv_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vfrdiv.vf v25, v25, fa0
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -1636,8 +1539,7 @@ define void @fdiv_fv_v8f16(<8 x half>* %x, half %y) {
 define void @fdiv_fv_v4f32(<4 x float>* %x, float %y) {
 ; CHECK-LABEL: fdiv_fv_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vfrdiv.vf v25, v25, fa0
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -1653,8 +1555,7 @@ define void @fdiv_fv_v4f32(<4 x float>* %x, float %y) {
 define void @fdiv_fv_v2f64(<2 x double>* %x, double %y) {
 ; CHECK-LABEL: fdiv_fv_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vfrdiv.vf v25, v25, fa0
 ; CHECK-NEXT:    vse64.v v25, (a0)
@@ -1670,8 +1571,7 @@ define void @fdiv_fv_v2f64(<2 x double>* %x, double %y) {
 define void @fma_vf_v8f16(<8 x half>* %x, <8 x half>* %y, half %z) {
 ; CHECK-LABEL: fma_vf_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vfmacc.vf v26, fa0, v25
@@ -1689,8 +1589,7 @@ define void @fma_vf_v8f16(<8 x half>* %x, <8 x half>* %y, half %z) {
 define void @fma_vf_v4f32(<4 x float>* %x, <4 x float>* %y, float %z) {
 ; CHECK-LABEL: fma_vf_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vfmacc.vf v26, fa0, v25
@@ -1708,8 +1607,7 @@ define void @fma_vf_v4f32(<4 x float>* %x, <4 x float>* %y, float %z) {
 define void @fma_vf_v2f64(<2 x double>* %x, <2 x double>* %y, double %z) {
 ; CHECK-LABEL: fma_vf_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vfmacc.vf v26, fa0, v25
@@ -1727,8 +1625,7 @@ define void @fma_vf_v2f64(<2 x double>* %x, <2 x double>* %y, double %z) {
 define void @fma_fv_v8f16(<8 x half>* %x, <8 x half>* %y, half %z) {
 ; CHECK-LABEL: fma_fv_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vfmacc.vf v26, fa0, v25
@@ -1746,8 +1643,7 @@ define void @fma_fv_v8f16(<8 x half>* %x, <8 x half>* %y, half %z) {
 define void @fma_fv_v4f32(<4 x float>* %x, <4 x float>* %y, float %z) {
 ; CHECK-LABEL: fma_fv_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vfmacc.vf v26, fa0, v25
@@ -1765,8 +1661,7 @@ define void @fma_fv_v4f32(<4 x float>* %x, <4 x float>* %y, float %z) {
 define void @fma_fv_v2f64(<2 x double>* %x, <2 x double>* %y, double %z) {
 ; CHECK-LABEL: fma_fv_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vfmacc.vf v26, fa0, v25
@@ -1784,8 +1679,7 @@ define void @fma_fv_v2f64(<2 x double>* %x, <2 x double>* %y, double %z) {
 define void @fmsub_vf_v8f16(<8 x half>* %x, <8 x half>* %y, half %z) {
 ; CHECK-LABEL: fmsub_vf_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vfmsac.vf v26, fa0, v25
@@ -1804,8 +1698,7 @@ define void @fmsub_vf_v8f16(<8 x half>* %x, <8 x half>* %y, half %z) {
 define void @fnmsub_vf_v4f32(<4 x float>* %x, <4 x float>* %y, float %z) {
 ; CHECK-LABEL: fnmsub_vf_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vfnmsac.vf v26, fa0, v25
@@ -1824,8 +1717,7 @@ define void @fnmsub_vf_v4f32(<4 x float>* %x, <4 x float>* %y, float %z) {
 define void @fnmadd_vf_v2f64(<2 x double>* %x, <2 x double>* %y, double %z) {
 ; CHECK-LABEL: fnmadd_vf_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vfnmacc.vf v26, fa0, v25
@@ -1845,8 +1737,7 @@ define void @fnmadd_vf_v2f64(<2 x double>* %x, <2 x double>* %y, double %z) {
 define void @fnmsub_fv_v4f32(<4 x float>* %x, <4 x float>* %y, float %z) {
 ; CHECK-LABEL: fnmsub_fv_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vfnmsac.vf v26, fa0, v25
@@ -1865,8 +1756,7 @@ define void @fnmsub_fv_v4f32(<4 x float>* %x, <4 x float>* %y, float %z) {
 define void @fnmadd_fv_v2f64(<2 x double>* %x, <2 x double>* %y, double %z) {
 ; CHECK-LABEL: fnmadd_fv_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vfnmacc.vf v26, fa0, v25

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
index 7086e5a6aa71..3d329c75ab38 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
@@ -19,51 +19,49 @@ define void @insertelt_v4i64(<4 x i64>* %x, i64 %y) {
 ; RV32-NEXT:    andi sp, sp, -32
 ; RV32-NEXT:    sw a2, 32(sp)
 ; RV32-NEXT:    sw a1, 64(sp)
-; RV32-NEXT:    addi a1, zero, 8
-; RV32-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
+; RV32-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
 ; RV32-NEXT:    vle32.v v26, (a0)
 ; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
-; RV32-NEXT:    vmv.x.s a2, v26
-; RV32-NEXT:    sw a2, 0(sp)
-; RV32-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; RV32-NEXT:    addi a2, sp, 32
-; RV32-NEXT:    vle32.v v28, (a2)
+; RV32-NEXT:    vmv.x.s a1, v26
+; RV32-NEXT:    sw a1, 0(sp)
+; RV32-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
+; RV32-NEXT:    addi a1, sp, 32
+; RV32-NEXT:    vle32.v v28, (a1)
 ; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
-; RV32-NEXT:    vmv.x.s a2, v28
-; RV32-NEXT:    sw a2, 28(sp)
-; RV32-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; RV32-NEXT:    addi a2, sp, 64
-; RV32-NEXT:    vle32.v v28, (a2)
+; RV32-NEXT:    vmv.x.s a1, v28
+; RV32-NEXT:    sw a1, 28(sp)
+; RV32-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
+; RV32-NEXT:    addi a1, sp, 64
+; RV32-NEXT:    vle32.v v28, (a1)
 ; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
-; RV32-NEXT:    vmv.x.s a2, v28
-; RV32-NEXT:    sw a2, 24(sp)
-; RV32-NEXT:    addi a2, zero, 1
-; RV32-NEXT:    vsetvli a3, a2, e32,m2,ta,mu
+; RV32-NEXT:    vmv.x.s a1, v28
+; RV32-NEXT:    sw a1, 24(sp)
+; RV32-NEXT:    vsetivli a1, 1, e32,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v28, v26, 5
 ; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
-; RV32-NEXT:    vmv.x.s a3, v28
-; RV32-NEXT:    sw a3, 20(sp)
-; RV32-NEXT:    vsetvli a3, a2, e32,m2,ta,mu
+; RV32-NEXT:    vmv.x.s a1, v28
+; RV32-NEXT:    sw a1, 20(sp)
+; RV32-NEXT:    vsetivli a1, 1, e32,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v28, v26, 4
 ; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
-; RV32-NEXT:    vmv.x.s a3, v28
-; RV32-NEXT:    sw a3, 16(sp)
-; RV32-NEXT:    vsetvli a3, a2, e32,m2,ta,mu
+; RV32-NEXT:    vmv.x.s a1, v28
+; RV32-NEXT:    sw a1, 16(sp)
+; RV32-NEXT:    vsetivli a1, 1, e32,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v28, v26, 3
 ; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
-; RV32-NEXT:    vmv.x.s a3, v28
-; RV32-NEXT:    sw a3, 12(sp)
-; RV32-NEXT:    vsetvli a3, a2, e32,m2,ta,mu
+; RV32-NEXT:    vmv.x.s a1, v28
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    vsetivli a1, 1, e32,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v28, v26, 2
 ; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
-; RV32-NEXT:    vmv.x.s a3, v28
-; RV32-NEXT:    sw a3, 8(sp)
-; RV32-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; RV32-NEXT:    vmv.x.s a1, v28
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    vsetivli a1, 1, e32,m2,ta,mu
 ; RV32-NEXT:    vslidedown.vi v26, v26, 1
 ; RV32-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
-; RV32-NEXT:    vmv.x.s a2, v26
-; RV32-NEXT:    sw a2, 4(sp)
-; RV32-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; RV32-NEXT:    vmv.x.s a1, v26
+; RV32-NEXT:    sw a1, 4(sp)
+; RV32-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
 ; RV32-NEXT:    vle32.v v26, (sp)
 ; RV32-NEXT:    vse32.v v26, (a0)
 ; RV32-NEXT:    addi sp, s0, -128
@@ -83,31 +81,29 @@ define void @insertelt_v4i64(<4 x i64>* %x, i64 %y) {
 ; RV64-NEXT:    addi s0, sp, 96
 ; RV64-NEXT:    .cfi_def_cfa s0, 0
 ; RV64-NEXT:    andi sp, sp, -32
-; RV64-NEXT:    addi a2, zero, 4
-; RV64-NEXT:    vsetvli a3, a2, e64,m2,ta,mu
+; RV64-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; RV64-NEXT:    vle64.v v26, (a0)
 ; RV64-NEXT:    sd a1, 32(sp)
 ; RV64-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; RV64-NEXT:    vmv.x.s a1, v26
 ; RV64-NEXT:    sd a1, 0(sp)
-; RV64-NEXT:    vsetvli a1, a2, e64,m2,ta,mu
+; RV64-NEXT:    vsetivli a1, 4, e64,m2,ta,mu
 ; RV64-NEXT:    addi a1, sp, 32
 ; RV64-NEXT:    vle64.v v28, (a1)
 ; RV64-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; RV64-NEXT:    vmv.x.s a1, v28
 ; RV64-NEXT:    sd a1, 24(sp)
-; RV64-NEXT:    addi a1, zero, 1
-; RV64-NEXT:    vsetvli a3, a1, e64,m2,ta,mu
+; RV64-NEXT:    vsetivli a1, 1, e64,m2,ta,mu
 ; RV64-NEXT:    vslidedown.vi v28, v26, 2
 ; RV64-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
-; RV64-NEXT:    vmv.x.s a3, v28
-; RV64-NEXT:    sd a3, 16(sp)
-; RV64-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; RV64-NEXT:    vmv.x.s a1, v28
+; RV64-NEXT:    sd a1, 16(sp)
+; RV64-NEXT:    vsetivli a1, 1, e64,m2,ta,mu
 ; RV64-NEXT:    vslidedown.vi v26, v26, 1
 ; RV64-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
 ; RV64-NEXT:    vmv.x.s a1, v26
 ; RV64-NEXT:    sd a1, 8(sp)
-; RV64-NEXT:    vsetvli a1, a2, e64,m2,ta,mu
+; RV64-NEXT:    vsetivli a1, 4, e64,m2,ta,mu
 ; RV64-NEXT:    vle64.v v26, (sp)
 ; RV64-NEXT:    vse64.v v26, (a0)
 ; RV64-NEXT:    addi sp, s0, -96

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
index 0b7991e23848..c862056d5510 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
@@ -5,8 +5,7 @@
 define void @buildvec_vid_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: buildvec_vid_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vid.v v25
 ; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -17,8 +16,7 @@ define void @buildvec_vid_v16i8(<16 x i8>* %x) {
 define void @buildvec_vid_undefelts_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: buildvec_vid_undefelts_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vid.v v25
 ; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -32,8 +30,7 @@ define void @buildvec_notquite_vid_v16i8(<16 x i8>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI2_0)
 ; CHECK-NEXT:    addi a1, a1, %lo(.LCPI2_0)
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a1)
 ; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -47,8 +44,7 @@ define void @buildvec_vid_plus_imm_v16i8(<16 x i8>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI3_0)
 ; CHECK-NEXT:    addi a1, a1, %lo(.LCPI3_0)
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a1)
 ; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -62,8 +58,7 @@ define void @buildvec_vid_mpy_imm_v16i8(<16 x i8>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lui a1, %hi(.LCPI4_0)
 ; CHECK-NEXT:    addi a1, a1, %lo(.LCPI4_0)
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a1)
 ; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll
index b9545bcf218f..cbabbb082e11 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll
@@ -9,8 +9,7 @@
 define void @seteq_vv_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; CHECK-LABEL: seteq_vv_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vmseq.vv v0, v25, v26
@@ -83,8 +82,7 @@ define void @setlt_vv_v128i8(<128 x i8>* %x, <128 x i8>* %y, <128 x i1>* %z) {
 define void @setge_vv_v8i8(<8 x i8>* %x, <8 x i8>* %y, <8 x i1>* %z) {
 ; CHECK-LABEL: setge_vv_v8i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 8
-; CHECK-NEXT:    vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vmsle.vv v27, v26, v25
@@ -100,8 +98,7 @@ define void @setge_vv_v8i8(<8 x i8>* %x, <8 x i8>* %y, <8 x i1>* %z) {
 define void @setle_vv_v16i8(<16 x i8>* %x, <16 x i8>* %y, <16 x i1>* %z) {
 ; CHECK-LABEL: setle_vv_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 16
-; CHECK-NEXT:    vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vmsle.vv v27, v25, v26
@@ -168,8 +165,7 @@ define void @setuge_vv_v128i8(<128 x i8>* %x, <128 x i8>* %y, <128 x i1>* %z) {
 define void @setule_vv_v8i8(<8 x i8>* %x, <8 x i8>* %y, <8 x i1>* %z) {
 ; CHECK-LABEL: setule_vv_v8i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 8
-; CHECK-NEXT:    vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vmsleu.vv v27, v25, v26
@@ -185,8 +181,7 @@ define void @setule_vv_v8i8(<8 x i8>* %x, <8 x i8>* %y, <8 x i1>* %z) {
 define void @seteq_vx_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) {
 ; CHECK-LABEL: seteq_vx_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 16
-; CHECK-NEXT:    vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vmseq.vx v26, v25, a1
 ; CHECK-NEXT:    vse1.v v26, (a2)
@@ -253,8 +248,7 @@ define void @setlt_vx_v128i8(<128 x i8>* %x, i8 %y, <128 x i1>* %z) {
 define void @setge_vx_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) {
 ; CHECK-LABEL: setge_vx_v8i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 8
-; CHECK-NEXT:    vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vmv.v.x v26, a1
 ; CHECK-NEXT:    vmsle.vv v27, v26, v25
@@ -271,8 +265,7 @@ define void @setge_vx_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) {
 define void @setle_vx_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) {
 ; CHECK-LABEL: setle_vx_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 16
-; CHECK-NEXT:    vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vmsle.vx v26, v25, a1
 ; CHECK-NEXT:    vse1.v v26, (a2)
@@ -340,8 +333,7 @@ define void @setuge_vx_v128i8(<128 x i8>* %x, i8 %y, <128 x i1>* %z) {
 define void @setule_vx_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) {
 ; CHECK-LABEL: setule_vx_v8i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 8
-; CHECK-NEXT:    vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vmsleu.vx v26, v25, a1
 ; CHECK-NEXT:    vse1.v v26, (a2)
@@ -357,8 +349,7 @@ define void @setule_vx_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) {
 define void @seteq_xv_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) {
 ; CHECK-LABEL: seteq_xv_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 16
-; CHECK-NEXT:    vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vmseq.vx v26, v25, a1
 ; CHECK-NEXT:    vse1.v v26, (a2)
@@ -425,8 +416,7 @@ define void @setlt_xv_v128i8(<128 x i8>* %x, i8 %y, <128 x i1>* %z) {
 define void @setge_xv_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) {
 ; CHECK-LABEL: setge_xv_v8i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 8
-; CHECK-NEXT:    vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vmsle.vx v26, v25, a1
 ; CHECK-NEXT:    vse1.v v26, (a2)
@@ -442,8 +432,7 @@ define void @setge_xv_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) {
 define void @setle_xv_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) {
 ; CHECK-LABEL: setle_xv_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 16
-; CHECK-NEXT:    vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vmv.v.x v26, a1
 ; CHECK-NEXT:    vmsle.vv v27, v26, v25
@@ -511,8 +500,7 @@ define void @setuge_xv_v128i8(<128 x i8>* %x, i8 %y, <128 x i1>* %z) {
 define void @setule_xv_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) {
 ; CHECK-LABEL: setule_xv_v8i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 8
-; CHECK-NEXT:    vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a3, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vmv.v.x v26, a1
 ; CHECK-NEXT:    vmsleu.vv v27, v26, v25
@@ -529,8 +517,7 @@ define void @setule_xv_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) {
 define void @seteq_vi_v16i8(<16 x i8>* %x, <16 x i1>* %z) {
 ; CHECK-LABEL: seteq_vi_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vmseq.vi v26, v25, 0
 ; CHECK-NEXT:    vse1.v v26, (a1)
@@ -597,8 +584,7 @@ define void @setlt_vi_v128i8(<128 x i8>* %x, <128 x i1>* %z) {
 define void @setge_vi_v8i8(<8 x i8>* %x, <8 x i1>* %z) {
 ; CHECK-LABEL: setge_vi_v8i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vmv.v.i v26, 0
 ; CHECK-NEXT:    vmsle.vv v27, v26, v25
@@ -615,8 +601,7 @@ define void @setge_vi_v8i8(<8 x i8>* %x, <8 x i1>* %z) {
 define void @setle_vi_v16i8(<16 x i8>* %x, <16 x i1>* %z) {
 ; CHECK-LABEL: setle_vi_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vmsle.vi v26, v25, 0
 ; CHECK-NEXT:    vse1.v v26, (a1)
@@ -686,8 +671,7 @@ define void @setuge_vi_v128i8(<128 x i8>* %x, <128 x i1>* %z) {
 define void @setule_vi_v8i8(<8 x i8>* %x, <8 x i1>* %z) {
 ; CHECK-LABEL: setule_vi_v8i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vmsleu.vi v26, v25, 5
 ; CHECK-NEXT:    vse1.v v26, (a1)
@@ -703,8 +687,7 @@ define void @setule_vi_v8i8(<8 x i8>* %x, <8 x i1>* %z) {
 define void @seteq_vv_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; CHECK-LABEL: seteq_vv_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vmseq.vv v0, v25, v26
@@ -723,8 +706,7 @@ define void @seteq_vv_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 define void @setne_vv_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; CHECK-LABEL: setne_vv_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vmsne.vv v0, v25, v26
@@ -743,8 +725,7 @@ define void @setne_vv_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 define void @setgt_vv_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; CHECK-LABEL: setgt_vv_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vmslt.vv v0, v26, v25
@@ -763,8 +744,7 @@ define void @setgt_vv_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 define void @setlt_vv_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; CHECK-LABEL: setlt_vv_v16i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vmslt.vv v0, v26, v28
@@ -783,8 +763,7 @@ define void @setlt_vv_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 define void @setugt_vv_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; CHECK-LABEL: setugt_vv_v8i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vmsltu.vv v0, v28, v26
@@ -803,8 +782,7 @@ define void @setugt_vv_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 define void @setult_vv_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; CHECK-LABEL: setult_vv_v4i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; CHECK-NEXT:    vle64.v v26, (a0)
 ; CHECK-NEXT:    vle64.v v28, (a1)
 ; CHECK-NEXT:    vmsltu.vv v0, v26, v28

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat-rv32.ll
index 1755162b5337..2dc59a23f1fc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat-rv32.ll
@@ -5,8 +5,7 @@
 define void @splat_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: splat_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -19,8 +18,7 @@ define void @splat_v16i8(<16 x i8>* %x, i8 %y) {
 define void @splat_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: splat_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -33,8 +31,7 @@ define void @splat_v8i16(<8 x i16>* %x, i16 %y) {
 define void @splat_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: splat_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -63,8 +60,7 @@ define void @splat_v32i8(<32 x i8>* %x, i8 %y) {
 ;
 ; LMULMAX1-LABEL: splat_v32i8:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a2, zero, 16
-; LMULMAX1-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.x v25, a1
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse8.v v25, (a1)
@@ -79,16 +75,14 @@ define void @splat_v32i8(<32 x i8>* %x, i8 %y) {
 define void @splat_v16i16(<16 x i16>* %x, i16 %y) {
 ; LMULMAX2-LABEL: splat_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.x v26, a1
 ; LMULMAX2-NEXT:    vse16.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_v16i16:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a2, zero, 8
-; LMULMAX1-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.x v25, a1
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse16.v v25, (a1)
@@ -103,16 +97,14 @@ define void @splat_v16i16(<16 x i16>* %x, i16 %y) {
 define void @splat_v8i32(<8 x i32>* %x, i32 %y) {
 ; LMULMAX2-LABEL: splat_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.x v26, a1
 ; LMULMAX2-NEXT:    vse32.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_v8i32:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a2, zero, 4
-; LMULMAX1-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.x v25, a1
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse32.v v25, (a1)
@@ -135,8 +127,7 @@ define void @splat_v8i32(<8 x i32>* %x, i32 %y) {
 define void @splat_zero_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: splat_zero_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -149,8 +140,7 @@ define void @splat_zero_v16i8(<16 x i8>* %x) {
 define void @splat_zero_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: splat_zero_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vse16.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -163,8 +153,7 @@ define void @splat_zero_v8i16(<8 x i16>* %x) {
 define void @splat_zero_v4i32(<4 x i32>* %x) {
 ; CHECK-LABEL: splat_zero_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vse32.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -177,8 +166,7 @@ define void @splat_zero_v4i32(<4 x i32>* %x) {
 define void @splat_zero_v2i64(<2 x i64>* %x) {
 ; CHECK-LABEL: splat_zero_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vse32.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -199,8 +187,7 @@ define void @splat_zero_v32i8(<32 x i8>* %x) {
 ;
 ; LMULMAX1-LABEL: splat_zero_v32i8:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 16
-; LMULMAX1-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v25, 0
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse8.v v25, (a1)
@@ -215,16 +202,14 @@ define void @splat_zero_v32i8(<32 x i8>* %x) {
 define void @splat_zero_v16i16(<16 x i16>* %x) {
 ; LMULMAX2-LABEL: splat_zero_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 16
-; LMULMAX2-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.i v26, 0
 ; LMULMAX2-NEXT:    vse16.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_zero_v16i16:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 8
-; LMULMAX1-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v25, 0
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse16.v v25, (a1)
@@ -239,16 +224,14 @@ define void @splat_zero_v16i16(<16 x i16>* %x) {
 define void @splat_zero_v8i32(<8 x i32>* %x) {
 ; LMULMAX2-LABEL: splat_zero_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 8
-; LMULMAX2-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.i v26, 0
 ; LMULMAX2-NEXT:    vse32.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_zero_v8i32:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 4
-; LMULMAX1-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v25, 0
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse32.v v25, (a1)
@@ -263,16 +246,14 @@ define void @splat_zero_v8i32(<8 x i32>* %x) {
 define void @splat_zero_v4i64(<4 x i64>* %x) {
 ; LMULMAX2-LABEL: splat_zero_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 8
-; LMULMAX2-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.i v26, 0
 ; LMULMAX2-NEXT:    vse32.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_zero_v4i64:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 4
-; LMULMAX1-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v25, 0
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse32.v v25, (a1)
@@ -287,8 +268,7 @@ define void @splat_zero_v4i64(<4 x i64>* %x) {
 define void @splat_allones_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: splat_allones_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, -1
 ; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -301,8 +281,7 @@ define void @splat_allones_v16i8(<16 x i8>* %x) {
 define void @splat_allones_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: splat_allones_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, -1
 ; CHECK-NEXT:    vse16.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -315,8 +294,7 @@ define void @splat_allones_v8i16(<8 x i16>* %x) {
 define void @splat_allones_v4i32(<4 x i32>* %x) {
 ; CHECK-LABEL: splat_allones_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, -1
 ; CHECK-NEXT:    vse32.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -329,8 +307,7 @@ define void @splat_allones_v4i32(<4 x i32>* %x) {
 define void @splat_allones_v2i64(<2 x i64>* %x) {
 ; CHECK-LABEL: splat_allones_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, -1
 ; CHECK-NEXT:    vse32.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -351,8 +328,7 @@ define void @splat_allones_v32i8(<32 x i8>* %x) {
 ;
 ; LMULMAX1-LABEL: splat_allones_v32i8:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 16
-; LMULMAX1-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v25, -1
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse8.v v25, (a1)
@@ -367,16 +343,14 @@ define void @splat_allones_v32i8(<32 x i8>* %x) {
 define void @splat_allones_v16i16(<16 x i16>* %x) {
 ; LMULMAX2-LABEL: splat_allones_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 16
-; LMULMAX2-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.i v26, -1
 ; LMULMAX2-NEXT:    vse16.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_allones_v16i16:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 8
-; LMULMAX1-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v25, -1
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse16.v v25, (a1)
@@ -391,16 +365,14 @@ define void @splat_allones_v16i16(<16 x i16>* %x) {
 define void @splat_allones_v8i32(<8 x i32>* %x) {
 ; LMULMAX2-LABEL: splat_allones_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 8
-; LMULMAX2-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.i v26, -1
 ; LMULMAX2-NEXT:    vse32.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_allones_v8i32:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 4
-; LMULMAX1-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v25, -1
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse32.v v25, (a1)
@@ -415,16 +387,14 @@ define void @splat_allones_v8i32(<8 x i32>* %x) {
 define void @splat_allones_v4i64(<4 x i64>* %x) {
 ; LMULMAX2-LABEL: splat_allones_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 8
-; LMULMAX2-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.i v26, -1
 ; LMULMAX2-NEXT:    vse32.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_allones_v4i64:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 4
-; LMULMAX1-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v25, -1
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse32.v v25, (a1)
@@ -443,32 +413,28 @@ define void @splat_allones_v4i64(<4 x i64>* %x) {
 define void @splat_allones_with_use_v4i64(<4 x i64>* %x) {
 ; LMULMAX2-LABEL: splat_allones_with_use_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.i v28, -1
-; LMULMAX2-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vadd.vv v26, v26, v28
 ; LMULMAX2-NEXT:    vse64.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_allones_with_use_v4i64:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 2
-; LMULMAX1-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vle64.v v25, (a0)
-; LMULMAX1-NEXT:    addi a2, a0, 16
-; LMULMAX1-NEXT:    vle64.v v26, (a2)
-; LMULMAX1-NEXT:    addi a3, zero, 4
-; LMULMAX1-NEXT:    vsetvli a3, a3, e32,m1,ta,mu
+; LMULMAX1-NEXT:    addi a1, a0, 16
+; LMULMAX1-NEXT:    vle64.v v26, (a1)
+; LMULMAX1-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v27, -1
-; LMULMAX1-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vadd.vv v26, v26, v27
 ; LMULMAX1-NEXT:    vadd.vv v25, v25, v27
 ; LMULMAX1-NEXT:    vse64.v v25, (a0)
-; LMULMAX1-NEXT:    vse64.v v26, (a2)
+; LMULMAX1-NEXT:    vse64.v v26, (a1)
 ; LMULMAX1-NEXT:    ret
   %a = load <4 x i64>, <4 x i64>* %x
   %b = add <4 x i64> %a, <i64 -1, i64 -1, i64 -1, i64 -1>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat-rv64.ll
index 9cf5d75d5804..c18b8b1b7934 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat-rv64.ll
@@ -5,8 +5,7 @@
 define void @splat_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: splat_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -19,8 +18,7 @@ define void @splat_v16i8(<16 x i8>* %x, i8 %y) {
 define void @splat_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: splat_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -33,8 +31,7 @@ define void @splat_v8i16(<8 x i16>* %x, i16 %y) {
 define void @splat_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: splat_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -47,8 +44,7 @@ define void @splat_v4i32(<4 x i32>* %x, i32 %y) {
 define void @splat_v2i64(<2 x i64>* %x, i64 %y) {
 ; CHECK-LABEL: splat_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a1
 ; CHECK-NEXT:    vse64.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -69,8 +65,7 @@ define void @splat_v32i8(<32 x i8>* %x, i8 %y) {
 ;
 ; LMULMAX1-LABEL: splat_v32i8:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a2, zero, 16
-; LMULMAX1-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.x v25, a1
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse8.v v25, (a1)
@@ -85,16 +80,14 @@ define void @splat_v32i8(<32 x i8>* %x, i8 %y) {
 define void @splat_v16i16(<16 x i16>* %x, i16 %y) {
 ; LMULMAX2-LABEL: splat_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.x v26, a1
 ; LMULMAX2-NEXT:    vse16.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_v16i16:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a2, zero, 8
-; LMULMAX1-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.x v25, a1
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse16.v v25, (a1)
@@ -109,16 +102,14 @@ define void @splat_v16i16(<16 x i16>* %x, i16 %y) {
 define void @splat_v8i32(<8 x i32>* %x, i32 %y) {
 ; LMULMAX2-LABEL: splat_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.x v26, a1
 ; LMULMAX2-NEXT:    vse32.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_v8i32:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a2, zero, 4
-; LMULMAX1-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.x v25, a1
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse32.v v25, (a1)
@@ -133,16 +124,14 @@ define void @splat_v8i32(<8 x i32>* %x, i32 %y) {
 define void @splat_v4i64(<4 x i64>* %x, i64 %y) {
 ; LMULMAX2-LABEL: splat_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.x v26, a1
 ; LMULMAX2-NEXT:    vse64.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_v4i64:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a2, zero, 2
-; LMULMAX1-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.x v25, a1
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse64.v v25, (a1)
@@ -157,8 +146,7 @@ define void @splat_v4i64(<4 x i64>* %x, i64 %y) {
 define void @splat_zero_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: splat_zero_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -171,8 +159,7 @@ define void @splat_zero_v16i8(<16 x i8>* %x) {
 define void @splat_zero_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: splat_zero_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vse16.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -185,8 +172,7 @@ define void @splat_zero_v8i16(<8 x i16>* %x) {
 define void @splat_zero_v4i32(<4 x i32>* %x) {
 ; CHECK-LABEL: splat_zero_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vse32.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -199,8 +185,7 @@ define void @splat_zero_v4i32(<4 x i32>* %x) {
 define void @splat_zero_v2i64(<2 x i64>* %x) {
 ; CHECK-LABEL: splat_zero_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vse64.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -221,8 +206,7 @@ define void @splat_zero_v32i8(<32 x i8>* %x) {
 ;
 ; LMULMAX1-LABEL: splat_zero_v32i8:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 16
-; LMULMAX1-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v25, 0
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse8.v v25, (a1)
@@ -237,16 +221,14 @@ define void @splat_zero_v32i8(<32 x i8>* %x) {
 define void @splat_zero_v16i16(<16 x i16>* %x) {
 ; LMULMAX2-LABEL: splat_zero_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 16
-; LMULMAX2-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.i v26, 0
 ; LMULMAX2-NEXT:    vse16.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_zero_v16i16:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 8
-; LMULMAX1-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v25, 0
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse16.v v25, (a1)
@@ -261,16 +243,14 @@ define void @splat_zero_v16i16(<16 x i16>* %x) {
 define void @splat_zero_v8i32(<8 x i32>* %x) {
 ; LMULMAX2-LABEL: splat_zero_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 8
-; LMULMAX2-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.i v26, 0
 ; LMULMAX2-NEXT:    vse32.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_zero_v8i32:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 4
-; LMULMAX1-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v25, 0
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse32.v v25, (a1)
@@ -285,16 +265,14 @@ define void @splat_zero_v8i32(<8 x i32>* %x) {
 define void @splat_zero_v4i64(<4 x i64>* %x) {
 ; LMULMAX2-LABEL: splat_zero_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 4
-; LMULMAX2-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.i v26, 0
 ; LMULMAX2-NEXT:    vse64.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_zero_v4i64:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 2
-; LMULMAX1-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v25, 0
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse64.v v25, (a1)
@@ -309,8 +287,7 @@ define void @splat_zero_v4i64(<4 x i64>* %x) {
 define void @splat_allones_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: splat_allones_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, -1
 ; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -323,8 +300,7 @@ define void @splat_allones_v16i8(<16 x i8>* %x) {
 define void @splat_allones_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: splat_allones_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, -1
 ; CHECK-NEXT:    vse16.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -337,8 +313,7 @@ define void @splat_allones_v8i16(<8 x i16>* %x) {
 define void @splat_allones_v4i32(<4 x i32>* %x) {
 ; CHECK-LABEL: splat_allones_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, -1
 ; CHECK-NEXT:    vse32.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -351,8 +326,7 @@ define void @splat_allones_v4i32(<4 x i32>* %x) {
 define void @splat_allones_v2i64(<2 x i64>* %x) {
 ; CHECK-LABEL: splat_allones_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v25, -1
 ; CHECK-NEXT:    vse64.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -373,8 +347,7 @@ define void @splat_allones_v32i8(<32 x i8>* %x) {
 ;
 ; LMULMAX1-LABEL: splat_allones_v32i8:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 16
-; LMULMAX1-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v25, -1
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse8.v v25, (a1)
@@ -389,16 +362,14 @@ define void @splat_allones_v32i8(<32 x i8>* %x) {
 define void @splat_allones_v16i16(<16 x i16>* %x) {
 ; LMULMAX2-LABEL: splat_allones_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 16
-; LMULMAX2-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.i v26, -1
 ; LMULMAX2-NEXT:    vse16.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_allones_v16i16:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 8
-; LMULMAX1-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v25, -1
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse16.v v25, (a1)
@@ -413,16 +384,14 @@ define void @splat_allones_v16i16(<16 x i16>* %x) {
 define void @splat_allones_v8i32(<8 x i32>* %x) {
 ; LMULMAX2-LABEL: splat_allones_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 8
-; LMULMAX2-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.i v26, -1
 ; LMULMAX2-NEXT:    vse32.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_allones_v8i32:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 4
-; LMULMAX1-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v25, -1
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse32.v v25, (a1)
@@ -437,16 +406,14 @@ define void @splat_allones_v8i32(<8 x i32>* %x) {
 define void @splat_allones_v4i64(<4 x i64>* %x) {
 ; LMULMAX2-LABEL: splat_allones_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 4
-; LMULMAX2-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vmv.v.i v26, -1
 ; LMULMAX2-NEXT:    vse64.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: splat_allones_v4i64:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, zero, 2
-; LMULMAX1-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v25, -1
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vse64.v v25, (a1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
index d124633d0b16..39ea974b6718 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
@@ -7,8 +7,7 @@
 define void @gather_const_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: gather_const_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vrgather.vi v26, v25, 12
 ; CHECK-NEXT:    vse8.v v26, (a0)
@@ -24,8 +23,7 @@ define void @gather_const_v16i8(<16 x i8>* %x) {
 define void @gather_const_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: gather_const_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vrgather.vi v26, v25, 5
 ; CHECK-NEXT:    vse16.v v26, (a0)
@@ -41,8 +39,7 @@ define void @gather_const_v8i16(<8 x i16>* %x) {
 define void @gather_const_v4i32(<4 x i32>* %x) {
 ; CHECK-LABEL: gather_const_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vrgather.vi v26, v25, 3
 ; CHECK-NEXT:    vse32.v v26, (a0)
@@ -58,8 +55,7 @@ define void @gather_const_v4i32(<4 x i32>* %x) {
 define void @gather_const_v2i64(<2 x i64>* %x) {
 ; CHECK-LABEL: gather_const_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vrgather.vi v26, v25, 1
 ; CHECK-NEXT:    vse64.v v26, (a0)
@@ -86,8 +82,7 @@ define void @gather_const_v64i8(<64 x i8>* %x) {
 ; LMULMAX1-LABEL: gather_const_v64i8:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    addi a1, a0, 32
-; LMULMAX1-NEXT:    addi a2, zero, 16
-; LMULMAX1-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-NEXT:    vle8.v v25, (a1)
 ; LMULMAX1-NEXT:    addi a2, a0, 16
 ; LMULMAX1-NEXT:    addi a3, a0, 48
@@ -118,8 +113,7 @@ define void @gather_const_v16i16(<32 x i16>* %x) {
 ; LMULMAX1-LABEL: gather_const_v16i16:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    addi a1, a0, 48
-; LMULMAX1-NEXT:    addi a2, zero, 8
-; LMULMAX1-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-NEXT:    vle16.v v25, (a1)
 ; LMULMAX1-NEXT:    addi a2, a0, 16
 ; LMULMAX1-NEXT:    addi a3, a0, 32
@@ -140,8 +134,7 @@ define void @gather_const_v16i16(<32 x i16>* %x) {
 define void @gather_const_v16i32(<16 x i32>* %x) {
 ; LMULMAX4-LABEL: gather_const_v16i32:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    addi a1, zero, 16
-; LMULMAX4-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
+; LMULMAX4-NEXT:    vsetivli a1, 16, e32,m4,ta,mu
 ; LMULMAX4-NEXT:    vle32.v v28, (a0)
 ; LMULMAX4-NEXT:    vrgather.vi v8, v28, 9
 ; LMULMAX4-NEXT:    vse32.v v8, (a0)
@@ -150,8 +143,7 @@ define void @gather_const_v16i32(<16 x i32>* %x) {
 ; LMULMAX1-LABEL: gather_const_v16i32:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    addi a1, a0, 32
-; LMULMAX1-NEXT:    addi a2, zero, 4
-; LMULMAX1-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-NEXT:    vle32.v v25, (a1)
 ; LMULMAX1-NEXT:    addi a2, a0, 16
 ; LMULMAX1-NEXT:    addi a3, a0, 48
@@ -172,8 +164,7 @@ define void @gather_const_v16i32(<16 x i32>* %x) {
 define void @gather_const_v8i64(<8 x i64>* %x) {
 ; LMULMAX4-LABEL: gather_const_v8i64:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    addi a1, zero, 8
-; LMULMAX4-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
+; LMULMAX4-NEXT:    vsetivli a1, 8, e64,m4,ta,mu
 ; LMULMAX4-NEXT:    vle64.v v28, (a0)
 ; LMULMAX4-NEXT:    vrgather.vi v8, v28, 3
 ; LMULMAX4-NEXT:    vse64.v v8, (a0)
@@ -182,8 +173,7 @@ define void @gather_const_v8i64(<8 x i64>* %x) {
 ; LMULMAX1-LABEL: gather_const_v8i64:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    addi a1, a0, 16
-; LMULMAX1-NEXT:    addi a2, zero, 2
-; LMULMAX1-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-NEXT:    vle64.v v25, (a1)
 ; LMULMAX1-NEXT:    addi a2, a0, 48
 ; LMULMAX1-NEXT:    addi a3, a0, 32

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
index 6a8b0611c248..35ac5e212793 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
@@ -7,8 +7,7 @@
 define void @add_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; CHECK-LABEL: add_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
@@ -24,8 +23,7 @@ define void @add_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 define void @add_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; CHECK-LABEL: add_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
@@ -41,8 +39,7 @@ define void @add_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 define void @add_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; CHECK-LABEL: add_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
@@ -58,8 +55,7 @@ define void @add_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 define void @add_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; CHECK-LABEL: add_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
@@ -75,8 +71,7 @@ define void @add_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 define void @sub_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; CHECK-LABEL: sub_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vsub.vv v25, v25, v26
@@ -92,8 +87,7 @@ define void @sub_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 define void @sub_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; CHECK-LABEL: sub_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vsub.vv v25, v25, v26
@@ -109,8 +103,7 @@ define void @sub_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 define void @sub_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; CHECK-LABEL: sub_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vsub.vv v25, v25, v26
@@ -126,8 +119,7 @@ define void @sub_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 define void @sub_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; CHECK-LABEL: sub_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vsub.vv v25, v25, v26
@@ -143,8 +135,7 @@ define void @sub_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 define void @mul_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; CHECK-LABEL: mul_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vmul.vv v25, v25, v26
@@ -160,8 +151,7 @@ define void @mul_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 define void @mul_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; CHECK-LABEL: mul_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vmul.vv v25, v25, v26
@@ -177,8 +167,7 @@ define void @mul_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 define void @mul_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; CHECK-LABEL: mul_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vmul.vv v25, v25, v26
@@ -194,8 +183,7 @@ define void @mul_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 define void @mul_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; CHECK-LABEL: mul_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vmul.vv v25, v25, v26
@@ -211,8 +199,7 @@ define void @mul_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 define void @and_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; CHECK-LABEL: and_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vand.vv v25, v25, v26
@@ -228,8 +215,7 @@ define void @and_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 define void @and_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; CHECK-LABEL: and_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vand.vv v25, v25, v26
@@ -245,8 +231,7 @@ define void @and_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 define void @and_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; CHECK-LABEL: and_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vand.vv v25, v25, v26
@@ -262,8 +247,7 @@ define void @and_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 define void @and_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; CHECK-LABEL: and_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vand.vv v25, v25, v26
@@ -279,8 +263,7 @@ define void @and_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 define void @or_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; CHECK-LABEL: or_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vor.vv v25, v25, v26
@@ -296,8 +279,7 @@ define void @or_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 define void @or_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; CHECK-LABEL: or_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vor.vv v25, v25, v26
@@ -313,8 +295,7 @@ define void @or_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 define void @or_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; CHECK-LABEL: or_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vor.vv v25, v25, v26
@@ -330,8 +311,7 @@ define void @or_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 define void @or_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; CHECK-LABEL: or_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vor.vv v25, v25, v26
@@ -347,8 +327,7 @@ define void @or_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 define void @xor_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; CHECK-LABEL: xor_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vxor.vv v25, v25, v26
@@ -364,8 +343,7 @@ define void @xor_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 define void @xor_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; CHECK-LABEL: xor_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vxor.vv v25, v25, v26
@@ -381,8 +359,7 @@ define void @xor_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 define void @xor_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; CHECK-LABEL: xor_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vxor.vv v25, v25, v26
@@ -398,8 +375,7 @@ define void @xor_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 define void @xor_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; CHECK-LABEL: xor_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vxor.vv v25, v25, v26
@@ -415,8 +391,7 @@ define void @xor_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 define void @lshr_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; CHECK-LABEL: lshr_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vsrl.vv v25, v25, v26
@@ -432,8 +407,7 @@ define void @lshr_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 define void @lshr_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; CHECK-LABEL: lshr_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vsrl.vv v25, v25, v26
@@ -449,8 +423,7 @@ define void @lshr_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 define void @lshr_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; CHECK-LABEL: lshr_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vsrl.vv v25, v25, v26
@@ -466,8 +439,7 @@ define void @lshr_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 define void @lshr_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; CHECK-LABEL: lshr_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vsrl.vv v25, v25, v26
@@ -483,8 +455,7 @@ define void @lshr_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 define void @ashr_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; CHECK-LABEL: ashr_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vsra.vv v25, v25, v26
@@ -500,8 +471,7 @@ define void @ashr_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 define void @ashr_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; CHECK-LABEL: ashr_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vsra.vv v25, v25, v26
@@ -517,8 +487,7 @@ define void @ashr_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 define void @ashr_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; CHECK-LABEL: ashr_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vsra.vv v25, v25, v26
@@ -534,8 +503,7 @@ define void @ashr_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 define void @ashr_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; CHECK-LABEL: ashr_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vsra.vv v25, v25, v26
@@ -551,8 +519,7 @@ define void @ashr_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 define void @shl_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; CHECK-LABEL: shl_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vsll.vv v25, v25, v26
@@ -568,8 +535,7 @@ define void @shl_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 define void @shl_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; CHECK-LABEL: shl_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vsll.vv v25, v25, v26
@@ -585,8 +551,7 @@ define void @shl_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 define void @shl_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; CHECK-LABEL: shl_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vsll.vv v25, v25, v26
@@ -602,8 +567,7 @@ define void @shl_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 define void @shl_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; CHECK-LABEL: shl_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vsll.vv v25, v25, v26
@@ -619,8 +583,7 @@ define void @shl_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 define void @sdiv_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; CHECK-LABEL: sdiv_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vdiv.vv v25, v25, v26
@@ -636,8 +599,7 @@ define void @sdiv_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 define void @sdiv_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; CHECK-LABEL: sdiv_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vdiv.vv v25, v25, v26
@@ -653,8 +615,7 @@ define void @sdiv_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 define void @sdiv_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; CHECK-LABEL: sdiv_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vdiv.vv v25, v25, v26
@@ -670,8 +631,7 @@ define void @sdiv_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 define void @sdiv_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; CHECK-LABEL: sdiv_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vdiv.vv v25, v25, v26
@@ -687,8 +647,7 @@ define void @sdiv_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 define void @srem_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; CHECK-LABEL: srem_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vrem.vv v25, v25, v26
@@ -704,8 +663,7 @@ define void @srem_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 define void @srem_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; CHECK-LABEL: srem_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vrem.vv v25, v25, v26
@@ -721,8 +679,7 @@ define void @srem_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 define void @srem_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; CHECK-LABEL: srem_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vrem.vv v25, v25, v26
@@ -738,8 +695,7 @@ define void @srem_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 define void @srem_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; CHECK-LABEL: srem_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vrem.vv v25, v25, v26
@@ -755,8 +711,7 @@ define void @srem_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 define void @udiv_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; CHECK-LABEL: udiv_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vdivu.vv v25, v25, v26
@@ -772,8 +727,7 @@ define void @udiv_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 define void @udiv_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; CHECK-LABEL: udiv_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vdivu.vv v25, v25, v26
@@ -789,8 +743,7 @@ define void @udiv_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 define void @udiv_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; CHECK-LABEL: udiv_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vdivu.vv v25, v25, v26
@@ -806,8 +759,7 @@ define void @udiv_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 define void @udiv_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; CHECK-LABEL: udiv_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vdivu.vv v25, v25, v26
@@ -823,8 +775,7 @@ define void @udiv_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 define void @urem_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; CHECK-LABEL: urem_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vremu.vv v25, v25, v26
@@ -840,8 +791,7 @@ define void @urem_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 define void @urem_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; CHECK-LABEL: urem_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vremu.vv v25, v25, v26
@@ -857,8 +807,7 @@ define void @urem_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 define void @urem_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; CHECK-LABEL: urem_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vremu.vv v25, v25, v26
@@ -874,8 +823,7 @@ define void @urem_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 define void @urem_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; CHECK-LABEL: urem_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vremu.vv v25, v25, v26
@@ -891,8 +839,7 @@ define void @urem_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 define void @mulhu_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: mulhu_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI52_0)
 ; CHECK-NEXT:    addi a1, a1, %lo(.LCPI52_0)
@@ -923,8 +870,7 @@ define void @mulhu_v16i8(<16 x i8>* %x) {
 define void @mulhu_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: mulhu_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI53_0)
 ; CHECK-NEXT:    addi a1, a1, %lo(.LCPI53_0)
@@ -955,8 +901,7 @@ define void @mulhu_v8i16(<8 x i16>* %x) {
 define void @mulhu_v4i32(<4 x i32>* %x) {
 ; CHECK-LABEL: mulhu_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI54_0)
 ; CHECK-NEXT:    addi a1, a1, %lo(.LCPI54_0)
@@ -983,29 +928,26 @@ define void @mulhu_v4i32(<4 x i32>* %x) {
 define void @mulhu_v2i64(<2 x i64>* %x) {
 ; LMULMAX1-RV32-LABEL: mulhu_v2i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
-; LMULMAX1-RV32-NEXT:    lui a2, %hi(.LCPI55_0)
-; LMULMAX1-RV32-NEXT:    addi a2, a2, %lo(.LCPI55_0)
-; LMULMAX1-RV32-NEXT:    addi a3, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a4, a3, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI55_0)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI55_0)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vmulhu.vv v25, v25, v26
-; LMULMAX1-RV32-NEXT:    lui a2, %hi(.LCPI55_1)
-; LMULMAX1-RV32-NEXT:    addi a2, a2, %lo(.LCPI55_1)
-; LMULMAX1-RV32-NEXT:    vsetvli a3, a3, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
-; LMULMAX1-RV32-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI55_1)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI55_1)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vsrl.vv v25, v25, v26
 ; LMULMAX1-RV32-NEXT:    vse64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    ret
 ;
 ; LMULMAX1-RV64-LABEL: mulhu_v2i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    lui a1, %hi(.LCPI55_0)
 ; LMULMAX1-RV64-NEXT:    addi a1, a1, %lo(.LCPI55_0)
@@ -1026,8 +968,7 @@ define void @mulhu_v2i64(<2 x i64>* %x) {
 define void @mulhs_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: mulhs_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI56_0)
 ; CHECK-NEXT:    addi a1, a1, %lo(.LCPI56_0)
@@ -1048,8 +989,7 @@ define void @mulhs_v16i8(<16 x i8>* %x) {
 define void @mulhs_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: mulhs_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    lui a1, %hi(.LCPI57_0)
 ; CHECK-NEXT:    addi a1, a1, %lo(.LCPI57_0)
@@ -1069,8 +1009,7 @@ define void @mulhs_v8i16(<8 x i16>* %x) {
 define void @mulhs_v4i32(<4 x i32>* %x) {
 ; LMULMAX1-RV32-LABEL: mulhs_v4i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI58_0)
 ; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI58_0)
@@ -1084,8 +1023,7 @@ define void @mulhs_v4i32(<4 x i32>* %x) {
 ;
 ; LMULMAX1-RV64-LABEL: mulhs_v4i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    lui a1, %hi(.LCPI58_0)
 ; LMULMAX1-RV64-NEXT:    addi a1, a1, %lo(.LCPI58_0)
@@ -1105,34 +1043,32 @@ define void @mulhs_v4i32(<4 x i32>* %x) {
 define void @mulhs_v2i64(<2 x i64>* %x) {
 ; LMULMAX1-RV32-LABEL: mulhs_v2i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
-; LMULMAX1-RV32-NEXT:    lui a2, %hi(.LCPI59_0)
-; LMULMAX1-RV32-NEXT:    addi a2, a2, %lo(.LCPI59_0)
-; LMULMAX1-RV32-NEXT:    addi a3, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a4, a3, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI59_0)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI59_0)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vmul.vv v26, v25, v26
-; LMULMAX1-RV32-NEXT:    lui a2, %hi(.LCPI59_1)
-; LMULMAX1-RV32-NEXT:    addi a2, a2, %lo(.LCPI59_1)
-; LMULMAX1-RV32-NEXT:    vsetvli a4, a3, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT:    vle32.v v27, (a2)
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI59_1)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI59_1)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v27, (a1)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vmulh.vv v25, v25, v27
 ; LMULMAX1-RV32-NEXT:    vadd.vv v25, v25, v26
-; LMULMAX1-RV32-NEXT:    lui a2, %hi(.LCPI59_2)
-; LMULMAX1-RV32-NEXT:    addi a2, a2, %lo(.LCPI59_2)
-; LMULMAX1-RV32-NEXT:    vsetvli a4, a3, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI59_2)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI59_2)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vsrl.vv v26, v25, v26
-; LMULMAX1-RV32-NEXT:    lui a2, %hi(.LCPI59_3)
-; LMULMAX1-RV32-NEXT:    addi a2, a2, %lo(.LCPI59_3)
-; LMULMAX1-RV32-NEXT:    vsetvli a3, a3, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT:    vle32.v v27, (a2)
-; LMULMAX1-RV32-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI59_3)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI59_3)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v27, (a1)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vsra.vv v25, v25, v27
 ; LMULMAX1-RV32-NEXT:    vadd.vv v25, v25, v26
 ; LMULMAX1-RV32-NEXT:    vse64.v v25, (a0)
@@ -1140,8 +1076,7 @@ define void @mulhs_v2i64(<2 x i64>* %x) {
 ;
 ; LMULMAX1-RV64-LABEL: mulhs_v2i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    lui a1, %hi(.LCPI59_0)
 ; LMULMAX1-RV64-NEXT:    addi a1, a1, %lo(.LCPI59_0)
@@ -1168,8 +1103,7 @@ define void @mulhs_v2i64(<2 x i64>* %x) {
 define void @smin_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; CHECK-LABEL: smin_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vmin.vv v25, v25, v26
@@ -1186,8 +1120,7 @@ define void @smin_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 define void @smin_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; CHECK-LABEL: smin_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vmin.vv v25, v25, v26
@@ -1204,8 +1137,7 @@ define void @smin_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 define void @smin_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; CHECK-LABEL: smin_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vmin.vv v25, v25, v26
@@ -1222,8 +1154,7 @@ define void @smin_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 define void @smin_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; CHECK-LABEL: smin_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vmin.vv v25, v25, v26
@@ -1240,8 +1171,7 @@ define void @smin_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 define void @smax_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; CHECK-LABEL: smax_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vmax.vv v25, v25, v26
@@ -1258,8 +1188,7 @@ define void @smax_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 define void @smax_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; CHECK-LABEL: smax_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vmax.vv v25, v25, v26
@@ -1276,8 +1205,7 @@ define void @smax_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 define void @smax_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; CHECK-LABEL: smax_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vmax.vv v25, v25, v26
@@ -1294,8 +1222,7 @@ define void @smax_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 define void @smax_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; CHECK-LABEL: smax_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vmax.vv v25, v25, v26
@@ -1312,8 +1239,7 @@ define void @smax_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 define void @umin_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; CHECK-LABEL: umin_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vminu.vv v25, v25, v26
@@ -1330,8 +1256,7 @@ define void @umin_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 define void @umin_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; CHECK-LABEL: umin_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vminu.vv v25, v25, v26
@@ -1348,8 +1273,7 @@ define void @umin_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 define void @umin_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; CHECK-LABEL: umin_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vminu.vv v25, v25, v26
@@ -1366,8 +1290,7 @@ define void @umin_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 define void @umin_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; CHECK-LABEL: umin_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vminu.vv v25, v25, v26
@@ -1384,8 +1307,7 @@ define void @umin_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 define void @umax_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; CHECK-LABEL: umax_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
 ; CHECK-NEXT:    vmaxu.vv v25, v25, v26
@@ -1402,8 +1324,7 @@ define void @umax_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 define void @umax_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; CHECK-LABEL: umax_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vmaxu.vv v25, v25, v26
@@ -1420,8 +1341,7 @@ define void @umax_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 define void @umax_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; CHECK-LABEL: umax_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vmaxu.vv v25, v25, v26
@@ -1438,8 +1358,7 @@ define void @umax_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 define void @umax_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; CHECK-LABEL: umax_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a0)
 ; CHECK-NEXT:    vle64.v v26, (a1)
 ; CHECK-NEXT:    vmaxu.vv v25, v25, v26
@@ -1466,8 +1385,7 @@ define void @add_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: add_v32i8:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle8.v v26, (a2)
@@ -1482,8 +1400,7 @@ define void @add_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: add_v32i8:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle8.v v26, (a2)
@@ -1505,8 +1422,7 @@ define void @add_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 define void @add_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-LABEL: add_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vadd.vv v26, v26, v28
@@ -1515,8 +1431,7 @@ define void @add_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: add_v16i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -1531,8 +1446,7 @@ define void @add_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: add_v16i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -1554,8 +1468,7 @@ define void @add_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 define void @add_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-LABEL: add_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vadd.vv v26, v26, v28
@@ -1564,8 +1477,7 @@ define void @add_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: add_v8i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -1580,8 +1492,7 @@ define void @add_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: add_v8i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -1603,8 +1514,7 @@ define void @add_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 define void @add_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-LABEL: add_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vadd.vv v26, v26, v28
@@ -1613,8 +1523,7 @@ define void @add_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: add_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -1629,8 +1538,7 @@ define void @add_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: add_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -1662,8 +1570,7 @@ define void @sub_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: sub_v32i8:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle8.v v26, (a2)
@@ -1678,8 +1585,7 @@ define void @sub_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: sub_v32i8:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle8.v v26, (a2)
@@ -1701,8 +1607,7 @@ define void @sub_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 define void @sub_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-LABEL: sub_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vsub.vv v26, v26, v28
@@ -1711,8 +1616,7 @@ define void @sub_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: sub_v16i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -1727,8 +1631,7 @@ define void @sub_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: sub_v16i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -1750,8 +1653,7 @@ define void @sub_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 define void @sub_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-LABEL: sub_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vsub.vv v26, v26, v28
@@ -1760,8 +1662,7 @@ define void @sub_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: sub_v8i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -1776,8 +1677,7 @@ define void @sub_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: sub_v8i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -1799,8 +1699,7 @@ define void @sub_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 define void @sub_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-LABEL: sub_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vsub.vv v26, v26, v28
@@ -1809,8 +1708,7 @@ define void @sub_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: sub_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -1825,8 +1723,7 @@ define void @sub_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: sub_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -1858,8 +1755,7 @@ define void @mul_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: mul_v32i8:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle8.v v26, (a2)
@@ -1874,8 +1770,7 @@ define void @mul_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: mul_v32i8:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle8.v v26, (a2)
@@ -1897,8 +1792,7 @@ define void @mul_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 define void @mul_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-LABEL: mul_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vmul.vv v26, v26, v28
@@ -1907,8 +1801,7 @@ define void @mul_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: mul_v16i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -1923,8 +1816,7 @@ define void @mul_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: mul_v16i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -1946,8 +1838,7 @@ define void @mul_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 define void @mul_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-LABEL: mul_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vmul.vv v26, v26, v28
@@ -1956,8 +1847,7 @@ define void @mul_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: mul_v8i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -1972,8 +1862,7 @@ define void @mul_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: mul_v8i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -1995,8 +1884,7 @@ define void @mul_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 define void @mul_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-LABEL: mul_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vmul.vv v26, v26, v28
@@ -2005,8 +1893,7 @@ define void @mul_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: mul_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -2021,8 +1908,7 @@ define void @mul_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: mul_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -2054,8 +1940,7 @@ define void @and_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: and_v32i8:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle8.v v26, (a2)
@@ -2070,8 +1955,7 @@ define void @and_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: and_v32i8:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle8.v v26, (a2)
@@ -2093,8 +1977,7 @@ define void @and_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 define void @and_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-LABEL: and_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vand.vv v26, v26, v28
@@ -2103,8 +1986,7 @@ define void @and_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: and_v16i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -2119,8 +2001,7 @@ define void @and_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: and_v16i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -2142,8 +2023,7 @@ define void @and_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 define void @and_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-LABEL: and_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vand.vv v26, v26, v28
@@ -2152,8 +2032,7 @@ define void @and_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: and_v8i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -2168,8 +2047,7 @@ define void @and_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: and_v8i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -2191,8 +2069,7 @@ define void @and_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 define void @and_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-LABEL: and_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vand.vv v26, v26, v28
@@ -2201,8 +2078,7 @@ define void @and_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: and_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -2217,8 +2093,7 @@ define void @and_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: and_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -2250,8 +2125,7 @@ define void @or_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: or_v32i8:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle8.v v26, (a2)
@@ -2266,8 +2140,7 @@ define void @or_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: or_v32i8:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle8.v v26, (a2)
@@ -2289,8 +2162,7 @@ define void @or_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 define void @or_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-LABEL: or_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vor.vv v26, v26, v28
@@ -2299,8 +2171,7 @@ define void @or_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: or_v16i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -2315,8 +2186,7 @@ define void @or_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: or_v16i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -2338,8 +2208,7 @@ define void @or_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 define void @or_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-LABEL: or_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vor.vv v26, v26, v28
@@ -2348,8 +2217,7 @@ define void @or_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: or_v8i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -2364,8 +2232,7 @@ define void @or_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: or_v8i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -2387,8 +2254,7 @@ define void @or_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 define void @or_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-LABEL: or_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vor.vv v26, v26, v28
@@ -2397,8 +2263,7 @@ define void @or_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: or_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -2413,8 +2278,7 @@ define void @or_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: or_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -2446,8 +2310,7 @@ define void @xor_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: xor_v32i8:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle8.v v26, (a2)
@@ -2462,8 +2325,7 @@ define void @xor_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: xor_v32i8:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle8.v v26, (a2)
@@ -2485,8 +2347,7 @@ define void @xor_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 define void @xor_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-LABEL: xor_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vxor.vv v26, v26, v28
@@ -2495,8 +2356,7 @@ define void @xor_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: xor_v16i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -2511,8 +2371,7 @@ define void @xor_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: xor_v16i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -2534,8 +2393,7 @@ define void @xor_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 define void @xor_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-LABEL: xor_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vxor.vv v26, v26, v28
@@ -2544,8 +2402,7 @@ define void @xor_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: xor_v8i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -2560,8 +2417,7 @@ define void @xor_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: xor_v8i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -2583,8 +2439,7 @@ define void @xor_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 define void @xor_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-LABEL: xor_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vxor.vv v26, v26, v28
@@ -2593,8 +2448,7 @@ define void @xor_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: xor_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -2609,8 +2463,7 @@ define void @xor_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: xor_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -2642,8 +2495,7 @@ define void @lshr_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: lshr_v32i8:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle8.v v26, (a2)
@@ -2658,8 +2510,7 @@ define void @lshr_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: lshr_v32i8:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle8.v v26, (a2)
@@ -2681,8 +2532,7 @@ define void @lshr_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 define void @lshr_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-LABEL: lshr_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vsrl.vv v26, v26, v28
@@ -2691,8 +2541,7 @@ define void @lshr_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: lshr_v16i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -2707,8 +2556,7 @@ define void @lshr_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: lshr_v16i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -2730,8 +2578,7 @@ define void @lshr_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 define void @lshr_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-LABEL: lshr_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vsrl.vv v26, v26, v28
@@ -2740,8 +2587,7 @@ define void @lshr_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: lshr_v8i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -2756,8 +2602,7 @@ define void @lshr_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: lshr_v8i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -2779,8 +2624,7 @@ define void @lshr_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 define void @lshr_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-LABEL: lshr_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vsrl.vv v26, v26, v28
@@ -2789,8 +2633,7 @@ define void @lshr_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: lshr_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -2805,8 +2648,7 @@ define void @lshr_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: lshr_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -2838,8 +2680,7 @@ define void @ashr_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: ashr_v32i8:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle8.v v26, (a2)
@@ -2854,8 +2695,7 @@ define void @ashr_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: ashr_v32i8:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle8.v v26, (a2)
@@ -2877,8 +2717,7 @@ define void @ashr_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 define void @ashr_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-LABEL: ashr_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vsra.vv v26, v26, v28
@@ -2887,8 +2726,7 @@ define void @ashr_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: ashr_v16i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -2903,8 +2741,7 @@ define void @ashr_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: ashr_v16i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -2926,8 +2763,7 @@ define void @ashr_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 define void @ashr_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-LABEL: ashr_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vsra.vv v26, v26, v28
@@ -2936,8 +2772,7 @@ define void @ashr_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: ashr_v8i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -2952,8 +2787,7 @@ define void @ashr_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: ashr_v8i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -2975,8 +2809,7 @@ define void @ashr_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 define void @ashr_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-LABEL: ashr_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vsra.vv v26, v26, v28
@@ -2985,8 +2818,7 @@ define void @ashr_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: ashr_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -3001,8 +2833,7 @@ define void @ashr_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: ashr_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -3034,8 +2865,7 @@ define void @shl_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: shl_v32i8:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle8.v v26, (a2)
@@ -3050,8 +2880,7 @@ define void @shl_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: shl_v32i8:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle8.v v26, (a2)
@@ -3073,8 +2902,7 @@ define void @shl_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 define void @shl_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-LABEL: shl_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vsll.vv v26, v26, v28
@@ -3083,8 +2911,7 @@ define void @shl_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: shl_v16i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -3099,8 +2926,7 @@ define void @shl_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: shl_v16i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -3122,8 +2948,7 @@ define void @shl_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 define void @shl_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-LABEL: shl_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vsll.vv v26, v26, v28
@@ -3132,8 +2957,7 @@ define void @shl_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: shl_v8i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -3148,8 +2972,7 @@ define void @shl_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: shl_v8i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -3171,8 +2994,7 @@ define void @shl_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 define void @shl_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-LABEL: shl_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vsll.vv v26, v26, v28
@@ -3181,8 +3003,7 @@ define void @shl_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: shl_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -3197,8 +3018,7 @@ define void @shl_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: shl_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -3230,8 +3050,7 @@ define void @sdiv_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: sdiv_v32i8:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle8.v v26, (a2)
@@ -3246,8 +3065,7 @@ define void @sdiv_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: sdiv_v32i8:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle8.v v26, (a2)
@@ -3269,8 +3087,7 @@ define void @sdiv_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 define void @sdiv_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-LABEL: sdiv_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vdiv.vv v26, v26, v28
@@ -3279,8 +3096,7 @@ define void @sdiv_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: sdiv_v16i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -3295,8 +3111,7 @@ define void @sdiv_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: sdiv_v16i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -3318,8 +3133,7 @@ define void @sdiv_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 define void @sdiv_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-LABEL: sdiv_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vdiv.vv v26, v26, v28
@@ -3328,8 +3142,7 @@ define void @sdiv_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: sdiv_v8i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -3344,8 +3157,7 @@ define void @sdiv_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: sdiv_v8i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -3367,8 +3179,7 @@ define void @sdiv_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 define void @sdiv_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-LABEL: sdiv_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vdiv.vv v26, v26, v28
@@ -3377,8 +3188,7 @@ define void @sdiv_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: sdiv_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -3393,8 +3203,7 @@ define void @sdiv_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: sdiv_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -3426,8 +3235,7 @@ define void @srem_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: srem_v32i8:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle8.v v26, (a2)
@@ -3442,8 +3250,7 @@ define void @srem_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: srem_v32i8:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle8.v v26, (a2)
@@ -3465,8 +3272,7 @@ define void @srem_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 define void @srem_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-LABEL: srem_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vrem.vv v26, v26, v28
@@ -3475,8 +3281,7 @@ define void @srem_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: srem_v16i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -3491,8 +3296,7 @@ define void @srem_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: srem_v16i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -3514,8 +3318,7 @@ define void @srem_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 define void @srem_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-LABEL: srem_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vrem.vv v26, v26, v28
@@ -3524,8 +3327,7 @@ define void @srem_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: srem_v8i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -3540,8 +3342,7 @@ define void @srem_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: srem_v8i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -3563,8 +3364,7 @@ define void @srem_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 define void @srem_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-LABEL: srem_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vrem.vv v26, v26, v28
@@ -3573,8 +3373,7 @@ define void @srem_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: srem_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -3589,8 +3388,7 @@ define void @srem_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: srem_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -3622,8 +3420,7 @@ define void @udiv_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: udiv_v32i8:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle8.v v26, (a2)
@@ -3638,8 +3435,7 @@ define void @udiv_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: udiv_v32i8:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle8.v v26, (a2)
@@ -3661,8 +3457,7 @@ define void @udiv_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 define void @udiv_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-LABEL: udiv_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vdivu.vv v26, v26, v28
@@ -3671,8 +3466,7 @@ define void @udiv_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: udiv_v16i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -3687,8 +3481,7 @@ define void @udiv_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: udiv_v16i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -3710,8 +3503,7 @@ define void @udiv_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 define void @udiv_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-LABEL: udiv_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vdivu.vv v26, v26, v28
@@ -3720,8 +3512,7 @@ define void @udiv_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: udiv_v8i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -3736,8 +3527,7 @@ define void @udiv_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: udiv_v8i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -3759,8 +3549,7 @@ define void @udiv_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 define void @udiv_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-LABEL: udiv_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vdivu.vv v26, v26, v28
@@ -3769,8 +3558,7 @@ define void @udiv_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: udiv_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -3785,8 +3573,7 @@ define void @udiv_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: udiv_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -3818,8 +3605,7 @@ define void @urem_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: urem_v32i8:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle8.v v26, (a2)
@@ -3834,8 +3620,7 @@ define void @urem_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: urem_v32i8:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle8.v v26, (a2)
@@ -3857,8 +3642,7 @@ define void @urem_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 define void @urem_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-LABEL: urem_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vremu.vv v26, v26, v28
@@ -3867,8 +3651,7 @@ define void @urem_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: urem_v16i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -3883,8 +3666,7 @@ define void @urem_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: urem_v16i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -3906,8 +3688,7 @@ define void @urem_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 define void @urem_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-LABEL: urem_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vremu.vv v26, v26, v28
@@ -3916,8 +3697,7 @@ define void @urem_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: urem_v8i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -3932,8 +3712,7 @@ define void @urem_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: urem_v8i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -3955,8 +3734,7 @@ define void @urem_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 define void @urem_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-LABEL: urem_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vremu.vv v26, v26, v28
@@ -3965,8 +3743,7 @@ define void @urem_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: urem_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -3981,8 +3758,7 @@ define void @urem_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: urem_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -4004,47 +3780,44 @@ define void @urem_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 define void @extract_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-LABEL: extract_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a3, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
-; LMULMAX2-NEXT:    vsetvli a1, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vadd.vv v26, v26, v28
 ; LMULMAX2-NEXT:    vse64.v v26, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-RV32-LABEL: extract_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
-; LMULMAX1-RV32-NEXT:    addi a3, a0, 16
-; LMULMAX1-RV32-NEXT:    vle64.v v26, (a3)
+; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
+; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
 ; LMULMAX1-RV32-NEXT:    vle64.v v27, (a1)
 ; LMULMAX1-RV32-NEXT:    addi a1, a1, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v28, (a1)
-; LMULMAX1-RV32-NEXT:    vsetvli a1, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vadd.vv v26, v26, v28
 ; LMULMAX1-RV32-NEXT:    vadd.vv v25, v25, v27
 ; LMULMAX1-RV32-NEXT:    vse64.v v25, (a0)
-; LMULMAX1-RV32-NEXT:    vse64.v v26, (a3)
+; LMULMAX1-RV32-NEXT:    vse64.v v26, (a2)
 ; LMULMAX1-RV32-NEXT:    ret
 ;
 ; LMULMAX1-RV64-LABEL: extract_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
-; LMULMAX1-RV64-NEXT:    addi a3, a0, 16
-; LMULMAX1-RV64-NEXT:    vle64.v v26, (a3)
+; LMULMAX1-RV64-NEXT:    addi a2, a0, 16
+; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
 ; LMULMAX1-RV64-NEXT:    vle64.v v27, (a1)
 ; LMULMAX1-RV64-NEXT:    addi a1, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v28, (a1)
-; LMULMAX1-RV64-NEXT:    vsetvli a1, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vadd.vv v26, v26, v28
 ; LMULMAX1-RV64-NEXT:    vadd.vv v25, v25, v27
 ; LMULMAX1-RV64-NEXT:    vse64.v v25, (a0)
-; LMULMAX1-RV64-NEXT:    vse64.v v26, (a3)
+; LMULMAX1-RV64-NEXT:    vse64.v v26, (a2)
 ; LMULMAX1-RV64-NEXT:    ret
   %a = load <4 x i64>, <4 x i64>* %x
   %b = load <4 x i64>, <4 x i64>* %y
@@ -4084,8 +3857,7 @@ define void @mulhu_v32i8(<32 x i8>* %x) {
 ;
 ; LMULMAX1-RV32-LABEL: mulhu_v32i8:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle8.v v25, (a1)
 ; LMULMAX1-RV32-NEXT:    lui a2, %hi(.LCPI129_0)
@@ -4100,8 +3872,7 @@ define void @mulhu_v32i8(<32 x i8>* %x) {
 ;
 ; LMULMAX1-RV64-LABEL: mulhu_v32i8:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV64-NEXT:    vle8.v v25, (a1)
 ; LMULMAX1-RV64-NEXT:    lui a2, %hi(.LCPI129_0)
@@ -4122,8 +3893,7 @@ define void @mulhu_v32i8(<32 x i8>* %x) {
 define void @mulhu_v16i16(<16 x i16>* %x) {
 ; LMULMAX2-LABEL: mulhu_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 16
-; LMULMAX2-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    lui a1, %hi(.LCPI130_0)
 ; LMULMAX2-NEXT:    addi a1, a1, %lo(.LCPI130_0)
@@ -4148,8 +3918,7 @@ define void @mulhu_v16i16(<16 x i16>* %x) {
 ;
 ; LMULMAX1-RV32-LABEL: mulhu_v16i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a1)
 ; LMULMAX1-RV32-NEXT:    lui a2, %hi(.LCPI130_0)
@@ -4164,8 +3933,7 @@ define void @mulhu_v16i16(<16 x i16>* %x) {
 ;
 ; LMULMAX1-RV64-LABEL: mulhu_v16i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a1)
 ; LMULMAX1-RV64-NEXT:    lui a2, %hi(.LCPI130_0)
@@ -4186,8 +3954,7 @@ define void @mulhu_v16i16(<16 x i16>* %x) {
 define void @mulhu_v8i32(<8 x i32>* %x) {
 ; LMULMAX2-LABEL: mulhu_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 8
-; LMULMAX2-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    lui a1, %hi(.LCPI131_0)
 ; LMULMAX2-NEXT:    addi a1, a1, %lo(.LCPI131_0)
@@ -4208,8 +3975,7 @@ define void @mulhu_v8i32(<8 x i32>* %x) {
 ;
 ; LMULMAX1-RV32-LABEL: mulhu_v8i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a1)
 ; LMULMAX1-RV32-NEXT:    lui a2, %hi(.LCPI131_0)
@@ -4238,8 +4004,7 @@ define void @mulhu_v8i32(<8 x i32>* %x) {
 ;
 ; LMULMAX1-RV64-LABEL: mulhu_v8i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a1)
 ; LMULMAX1-RV64-NEXT:    lui a2, %hi(.LCPI131_0)
@@ -4260,32 +4025,29 @@ define void @mulhu_v8i32(<8 x i32>* %x) {
 define void @mulhu_v4i64(<4 x i64>* %x) {
 ; LMULMAX1-RV32-LABEL: mulhu_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
-; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
-; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
-; LMULMAX1-RV32-NEXT:    lui a3, %hi(.LCPI132_0)
-; LMULMAX1-RV32-NEXT:    addi a3, a3, %lo(.LCPI132_0)
-; LMULMAX1-RV32-NEXT:    addi a4, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a5, a4, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT:    vle32.v v27, (a3)
-; LMULMAX1-RV32-NEXT:    vsetvli a3, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    addi a1, a0, 16
+; LMULMAX1-RV32-NEXT:    vle64.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    lui a2, %hi(.LCPI132_0)
+; LMULMAX1-RV32-NEXT:    addi a2, a2, %lo(.LCPI132_0)
+; LMULMAX1-RV32-NEXT:    vsetivli a3, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v27, (a2)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vdivu.vv v26, v26, v27
-; LMULMAX1-RV32-NEXT:    lui a3, %hi(.LCPI132_1)
-; LMULMAX1-RV32-NEXT:    addi a3, a3, %lo(.LCPI132_1)
-; LMULMAX1-RV32-NEXT:    vsetvli a4, a4, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT:    vle32.v v27, (a3)
-; LMULMAX1-RV32-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    lui a2, %hi(.LCPI132_1)
+; LMULMAX1-RV32-NEXT:    addi a2, a2, %lo(.LCPI132_1)
+; LMULMAX1-RV32-NEXT:    vsetivli a3, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v27, (a2)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vdivu.vv v25, v25, v27
 ; LMULMAX1-RV32-NEXT:    vse64.v v25, (a0)
-; LMULMAX1-RV32-NEXT:    vse64.v v26, (a2)
+; LMULMAX1-RV32-NEXT:    vse64.v v26, (a1)
 ; LMULMAX1-RV32-NEXT:    ret
 ;
 ; LMULMAX1-RV64-LABEL: mulhu_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a1)
 ; LMULMAX1-RV64-NEXT:    lui a2, %hi(.LCPI132_0)
@@ -4339,8 +4101,7 @@ define void @mulhs_v32i8(<32 x i8>* %x) {
 ;
 ; LMULMAX1-RV32-LABEL: mulhs_v32i8:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle8.v v25, (a1)
 ; LMULMAX1-RV32-NEXT:    lui a2, %hi(.LCPI133_0)
@@ -4355,8 +4116,7 @@ define void @mulhs_v32i8(<32 x i8>* %x) {
 ;
 ; LMULMAX1-RV64-LABEL: mulhs_v32i8:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV64-NEXT:    vle8.v v25, (a1)
 ; LMULMAX1-RV64-NEXT:    lui a2, %hi(.LCPI133_0)
@@ -4377,8 +4137,7 @@ define void @mulhs_v32i8(<32 x i8>* %x) {
 define void @mulhs_v16i16(<16 x i16>* %x) {
 ; LMULMAX2-LABEL: mulhs_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 16
-; LMULMAX2-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    lui a1, %hi(.LCPI134_0)
 ; LMULMAX2-NEXT:    addi a1, a1, %lo(.LCPI134_0)
@@ -4392,8 +4151,7 @@ define void @mulhs_v16i16(<16 x i16>* %x) {
 ;
 ; LMULMAX1-RV32-LABEL: mulhs_v16i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a1)
 ; LMULMAX1-RV32-NEXT:    lui a2, %hi(.LCPI134_0)
@@ -4408,8 +4166,7 @@ define void @mulhs_v16i16(<16 x i16>* %x) {
 ;
 ; LMULMAX1-RV64-LABEL: mulhs_v16i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a1)
 ; LMULMAX1-RV64-NEXT:    lui a2, %hi(.LCPI134_0)
@@ -4430,8 +4187,7 @@ define void @mulhs_v16i16(<16 x i16>* %x) {
 define void @mulhs_v8i32(<8 x i32>* %x) {
 ; LMULMAX1-RV32-LABEL: mulhs_v8i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a1)
 ; LMULMAX1-RV32-NEXT:    lui a2, %hi(.LCPI135_0)
@@ -4452,8 +4208,7 @@ define void @mulhs_v8i32(<8 x i32>* %x) {
 ;
 ; LMULMAX1-RV64-LABEL: mulhs_v8i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a1)
 ; LMULMAX1-RV64-NEXT:    lui a2, %hi(.LCPI135_0)
@@ -4474,27 +4229,24 @@ define void @mulhs_v8i32(<8 x i32>* %x) {
 define void @mulhs_v4i64(<4 x i64>* %x) {
 ; LMULMAX1-RV32-LABEL: mulhs_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
-; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
-; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
-; LMULMAX1-RV32-NEXT:    lui a3, %hi(.LCPI136_0)
-; LMULMAX1-RV32-NEXT:    addi a3, a3, %lo(.LCPI136_0)
-; LMULMAX1-RV32-NEXT:    addi a4, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a4, a4, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT:    vle32.v v27, (a3)
-; LMULMAX1-RV32-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    addi a1, a0, 16
+; LMULMAX1-RV32-NEXT:    vle64.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    lui a2, %hi(.LCPI136_0)
+; LMULMAX1-RV32-NEXT:    addi a2, a2, %lo(.LCPI136_0)
+; LMULMAX1-RV32-NEXT:    vsetivli a3, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v27, (a2)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vdiv.vv v26, v26, v27
 ; LMULMAX1-RV32-NEXT:    vdiv.vv v25, v25, v27
 ; LMULMAX1-RV32-NEXT:    vse64.v v25, (a0)
-; LMULMAX1-RV32-NEXT:    vse64.v v26, (a2)
+; LMULMAX1-RV32-NEXT:    vse64.v v26, (a1)
 ; LMULMAX1-RV32-NEXT:    ret
 ;
 ; LMULMAX1-RV64-LABEL: mulhs_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a1)
 ; LMULMAX1-RV64-NEXT:    lui a2, %hi(.LCPI136_0)
@@ -4540,8 +4292,7 @@ define void @smin_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: smin_v32i8:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle8.v v26, (a2)
@@ -4556,8 +4307,7 @@ define void @smin_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: smin_v32i8:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle8.v v26, (a2)
@@ -4580,8 +4330,7 @@ define void @smin_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 define void @smin_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-LABEL: smin_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vmin.vv v26, v26, v28
@@ -4590,8 +4339,7 @@ define void @smin_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: smin_v16i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -4606,8 +4354,7 @@ define void @smin_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: smin_v16i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -4630,8 +4377,7 @@ define void @smin_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 define void @smin_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-LABEL: smin_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vmin.vv v26, v26, v28
@@ -4640,8 +4386,7 @@ define void @smin_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: smin_v8i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -4656,8 +4401,7 @@ define void @smin_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: smin_v8i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -4680,8 +4424,7 @@ define void @smin_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 define void @smin_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-LABEL: smin_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vmin.vv v26, v26, v28
@@ -4690,8 +4433,7 @@ define void @smin_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: smin_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -4706,8 +4448,7 @@ define void @smin_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: smin_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -4740,8 +4481,7 @@ define void @smax_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: smax_v32i8:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle8.v v26, (a2)
@@ -4756,8 +4496,7 @@ define void @smax_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: smax_v32i8:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle8.v v26, (a2)
@@ -4780,8 +4519,7 @@ define void @smax_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 define void @smax_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-LABEL: smax_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vmax.vv v26, v26, v28
@@ -4790,8 +4528,7 @@ define void @smax_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: smax_v16i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -4806,8 +4543,7 @@ define void @smax_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: smax_v16i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -4830,8 +4566,7 @@ define void @smax_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 define void @smax_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-LABEL: smax_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vmax.vv v26, v26, v28
@@ -4840,8 +4575,7 @@ define void @smax_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: smax_v8i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -4856,8 +4590,7 @@ define void @smax_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: smax_v8i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -4880,8 +4613,7 @@ define void @smax_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 define void @smax_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-LABEL: smax_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vmax.vv v26, v26, v28
@@ -4890,8 +4622,7 @@ define void @smax_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: smax_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -4906,8 +4637,7 @@ define void @smax_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: smax_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -4940,8 +4670,7 @@ define void @umin_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: umin_v32i8:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle8.v v26, (a2)
@@ -4956,8 +4685,7 @@ define void @umin_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: umin_v32i8:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle8.v v26, (a2)
@@ -4980,8 +4708,7 @@ define void @umin_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 define void @umin_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-LABEL: umin_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vminu.vv v26, v26, v28
@@ -4990,8 +4717,7 @@ define void @umin_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: umin_v16i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -5006,8 +4732,7 @@ define void @umin_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: umin_v16i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -5030,8 +4755,7 @@ define void @umin_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 define void @umin_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-LABEL: umin_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vminu.vv v26, v26, v28
@@ -5040,8 +4764,7 @@ define void @umin_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: umin_v8i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -5056,8 +4779,7 @@ define void @umin_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: umin_v8i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -5080,8 +4802,7 @@ define void @umin_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 define void @umin_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-LABEL: umin_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vminu.vv v26, v26, v28
@@ -5090,8 +4811,7 @@ define void @umin_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: umin_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -5106,8 +4826,7 @@ define void @umin_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: umin_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -5140,8 +4859,7 @@ define void @umax_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: umax_v32i8:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle8.v v26, (a2)
@@ -5156,8 +4874,7 @@ define void @umax_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: umax_v32i8:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle8.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle8.v v26, (a2)
@@ -5180,8 +4897,7 @@ define void @umax_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 define void @umax_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-LABEL: umax_v16i16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 16
-; LMULMAX2-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 16, e16,m2,ta,mu
 ; LMULMAX2-NEXT:    vle16.v v26, (a0)
 ; LMULMAX2-NEXT:    vle16.v v28, (a1)
 ; LMULMAX2-NEXT:    vmaxu.vv v26, v26, v28
@@ -5190,8 +4906,7 @@ define void @umax_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: umax_v16i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v26, (a2)
@@ -5206,8 +4921,7 @@ define void @umax_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: umax_v16i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v26, (a2)
@@ -5230,8 +4944,7 @@ define void @umax_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 define void @umax_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-LABEL: umax_v8i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 8
-; LMULMAX2-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 8, e32,m2,ta,mu
 ; LMULMAX2-NEXT:    vle32.v v26, (a0)
 ; LMULMAX2-NEXT:    vle32.v v28, (a1)
 ; LMULMAX2-NEXT:    vmaxu.vv v26, v26, v28
@@ -5240,8 +4953,7 @@ define void @umax_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: umax_v8i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
@@ -5256,8 +4968,7 @@ define void @umax_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: umax_v8i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v26, (a2)
@@ -5280,8 +4991,7 @@ define void @umax_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 define void @umax_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-LABEL: umax_v4i64:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 4
-; LMULMAX2-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT:    vsetivli a2, 4, e64,m2,ta,mu
 ; LMULMAX2-NEXT:    vle64.v v26, (a0)
 ; LMULMAX2-NEXT:    vle64.v v28, (a1)
 ; LMULMAX2-NEXT:    vmaxu.vv v26, v26, v28
@@ -5290,8 +5000,7 @@ define void @umax_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV32-LABEL: umax_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a2, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v26, (a2)
@@ -5306,8 +5015,7 @@ define void @umax_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ;
 ; LMULMAX1-RV64-LABEL: umax_v4i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a2, a1, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v26, (a2)
@@ -5330,8 +5038,7 @@ define void @umax_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 define void @add_vi_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: add_vi_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vadd.vi v25, v25, -1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -5347,8 +5054,7 @@ define void @add_vi_v16i8(<16 x i8>* %x) {
 define void @add_vi_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: add_vi_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vadd.vi v25, v25, -1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -5364,8 +5070,7 @@ define void @add_vi_v8i16(<8 x i16>* %x) {
 define void @add_vi_v4i32(<4 x i32>* %x) {
 ; CHECK-LABEL: add_vi_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vadd.vi v25, v25, -1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -5381,8 +5086,7 @@ define void @add_vi_v4i32(<4 x i32>* %x) {
 define void @add_iv_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: add_iv_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vadd.vi v25, v25, 1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -5398,8 +5102,7 @@ define void @add_iv_v16i8(<16 x i8>* %x) {
 define void @add_iv_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: add_iv_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vadd.vi v25, v25, 1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -5415,8 +5118,7 @@ define void @add_iv_v8i16(<8 x i16>* %x) {
 define void @add_iv_v4i32(<4 x i32>* %x) {
 ; CHECK-LABEL: add_iv_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vadd.vi v25, v25, 1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -5432,8 +5134,7 @@ define void @add_iv_v4i32(<4 x i32>* %x) {
 define void @add_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: add_vx_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vadd.vx v25, v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -5449,8 +5150,7 @@ define void @add_vx_v16i8(<16 x i8>* %x, i8 %y) {
 define void @add_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: add_vx_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vadd.vx v25, v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -5466,8 +5166,7 @@ define void @add_vx_v8i16(<8 x i16>* %x, i16 %y) {
 define void @add_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: add_vx_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vadd.vx v25, v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -5483,8 +5182,7 @@ define void @add_vx_v4i32(<4 x i32>* %x, i32 %y) {
 define void @add_xv_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: add_xv_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vadd.vx v25, v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -5500,8 +5198,7 @@ define void @add_xv_v16i8(<16 x i8>* %x, i8 %y) {
 define void @add_xv_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: add_xv_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vadd.vx v25, v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -5517,8 +5214,7 @@ define void @add_xv_v8i16(<8 x i16>* %x, i16 %y) {
 define void @add_xv_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: add_xv_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vadd.vx v25, v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -5534,8 +5230,7 @@ define void @add_xv_v4i32(<4 x i32>* %x, i32 %y) {
 define void @sub_vi_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: sub_vi_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    addi a1, zero, -1
 ; CHECK-NEXT:    vsub.vx v25, v25, a1
@@ -5552,8 +5247,7 @@ define void @sub_vi_v16i8(<16 x i8>* %x) {
 define void @sub_vi_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: sub_vi_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    addi a1, zero, -1
 ; CHECK-NEXT:    vsub.vx v25, v25, a1
@@ -5570,8 +5264,7 @@ define void @sub_vi_v8i16(<8 x i16>* %x) {
 define void @sub_vi_v4i32(<4 x i32>* %x) {
 ; CHECK-LABEL: sub_vi_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    addi a1, zero, -1
 ; CHECK-NEXT:    vsub.vx v25, v25, a1
@@ -5588,8 +5281,7 @@ define void @sub_vi_v4i32(<4 x i32>* %x) {
 define void @sub_iv_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: sub_iv_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vrsub.vi v25, v25, 1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -5605,8 +5297,7 @@ define void @sub_iv_v16i8(<16 x i8>* %x) {
 define void @sub_iv_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: sub_iv_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vrsub.vi v25, v25, 1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -5622,8 +5313,7 @@ define void @sub_iv_v8i16(<8 x i16>* %x) {
 define void @sub_iv_v4i32(<4 x i32>* %x) {
 ; CHECK-LABEL: sub_iv_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vrsub.vi v25, v25, 1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -5639,8 +5329,7 @@ define void @sub_iv_v4i32(<4 x i32>* %x) {
 define void @sub_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: sub_vx_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vsub.vx v25, v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -5656,8 +5345,7 @@ define void @sub_vx_v16i8(<16 x i8>* %x, i8 %y) {
 define void @sub_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: sub_vx_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vsub.vx v25, v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -5673,8 +5361,7 @@ define void @sub_vx_v8i16(<8 x i16>* %x, i16 %y) {
 define void @sub_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: sub_vx_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vsub.vx v25, v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -5690,8 +5377,7 @@ define void @sub_vx_v4i32(<4 x i32>* %x, i32 %y) {
 define void @sub_xv_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: sub_xv_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vrsub.vx v25, v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -5707,8 +5393,7 @@ define void @sub_xv_v16i8(<16 x i8>* %x, i8 %y) {
 define void @sub_xv_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: sub_xv_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vrsub.vx v25, v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -5724,8 +5409,7 @@ define void @sub_xv_v8i16(<8 x i16>* %x, i16 %y) {
 define void @sub_xv_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: sub_xv_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vrsub.vx v25, v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -5741,8 +5425,7 @@ define void @sub_xv_v4i32(<4 x i32>* %x, i32 %y) {
 define void @mul_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: mul_vx_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vmul.vx v25, v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -5758,8 +5441,7 @@ define void @mul_vx_v16i8(<16 x i8>* %x, i8 %y) {
 define void @mul_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: mul_vx_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vmul.vx v25, v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -5775,8 +5457,7 @@ define void @mul_vx_v8i16(<8 x i16>* %x, i16 %y) {
 define void @mul_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: mul_vx_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vmul.vx v25, v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -5792,8 +5473,7 @@ define void @mul_vx_v4i32(<4 x i32>* %x, i32 %y) {
 define void @mul_xv_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: mul_xv_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vmul.vx v25, v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -5809,8 +5489,7 @@ define void @mul_xv_v16i8(<16 x i8>* %x, i8 %y) {
 define void @mul_xv_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: mul_xv_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vmul.vx v25, v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -5826,8 +5505,7 @@ define void @mul_xv_v8i16(<8 x i16>* %x, i16 %y) {
 define void @mul_xv_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: mul_xv_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vmul.vx v25, v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -5843,8 +5521,7 @@ define void @mul_xv_v4i32(<4 x i32>* %x, i32 %y) {
 define void @and_vi_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: and_vi_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vand.vi v25, v25, -2
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -5860,8 +5537,7 @@ define void @and_vi_v16i8(<16 x i8>* %x) {
 define void @and_vi_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: and_vi_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vand.vi v25, v25, -2
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -5877,8 +5553,7 @@ define void @and_vi_v8i16(<8 x i16>* %x) {
 define void @and_vi_v4i32(<4 x i32>* %x) {
 ; CHECK-LABEL: and_vi_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vand.vi v25, v25, -2
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -5894,8 +5569,7 @@ define void @and_vi_v4i32(<4 x i32>* %x) {
 define void @and_iv_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: and_iv_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vand.vi v25, v25, 1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -5911,8 +5585,7 @@ define void @and_iv_v16i8(<16 x i8>* %x) {
 define void @and_iv_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: and_iv_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vand.vi v25, v25, 1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -5928,8 +5601,7 @@ define void @and_iv_v8i16(<8 x i16>* %x) {
 define void @and_iv_v4i32(<4 x i32>* %x) {
 ; CHECK-LABEL: and_iv_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vand.vi v25, v25, 1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -5945,8 +5617,7 @@ define void @and_iv_v4i32(<4 x i32>* %x) {
 define void @and_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: and_vx_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vand.vx v25, v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -5962,8 +5633,7 @@ define void @and_vx_v16i8(<16 x i8>* %x, i8 %y) {
 define void @and_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: and_vx_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vand.vx v25, v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -5979,8 +5649,7 @@ define void @and_vx_v8i16(<8 x i16>* %x, i16 %y) {
 define void @and_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: and_vx_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vand.vx v25, v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -5996,8 +5665,7 @@ define void @and_vx_v4i32(<4 x i32>* %x, i32 %y) {
 define void @and_xv_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: and_xv_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vand.vx v25, v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -6013,8 +5681,7 @@ define void @and_xv_v16i8(<16 x i8>* %x, i8 %y) {
 define void @and_xv_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: and_xv_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vand.vx v25, v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -6030,8 +5697,7 @@ define void @and_xv_v8i16(<8 x i16>* %x, i16 %y) {
 define void @and_xv_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: and_xv_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vand.vx v25, v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -6047,8 +5713,7 @@ define void @and_xv_v4i32(<4 x i32>* %x, i32 %y) {
 define void @or_vi_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: or_vi_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vor.vi v25, v25, -2
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -6064,8 +5729,7 @@ define void @or_vi_v16i8(<16 x i8>* %x) {
 define void @or_vi_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: or_vi_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vor.vi v25, v25, -2
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -6081,8 +5745,7 @@ define void @or_vi_v8i16(<8 x i16>* %x) {
 define void @or_vi_v4i32(<4 x i32>* %x) {
 ; CHECK-LABEL: or_vi_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vor.vi v25, v25, -2
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -6098,8 +5761,7 @@ define void @or_vi_v4i32(<4 x i32>* %x) {
 define void @or_iv_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: or_iv_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vor.vi v25, v25, 1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -6115,8 +5777,7 @@ define void @or_iv_v16i8(<16 x i8>* %x) {
 define void @or_iv_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: or_iv_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vor.vi v25, v25, 1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -6132,8 +5793,7 @@ define void @or_iv_v8i16(<8 x i16>* %x) {
 define void @or_iv_v4i32(<4 x i32>* %x) {
 ; CHECK-LABEL: or_iv_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vor.vi v25, v25, 1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -6149,8 +5809,7 @@ define void @or_iv_v4i32(<4 x i32>* %x) {
 define void @or_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: or_vx_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vor.vx v25, v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -6166,8 +5825,7 @@ define void @or_vx_v16i8(<16 x i8>* %x, i8 %y) {
 define void @or_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: or_vx_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vor.vx v25, v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -6183,8 +5841,7 @@ define void @or_vx_v8i16(<8 x i16>* %x, i16 %y) {
 define void @or_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: or_vx_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vor.vx v25, v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -6200,8 +5857,7 @@ define void @or_vx_v4i32(<4 x i32>* %x, i32 %y) {
 define void @or_xv_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: or_xv_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vor.vx v25, v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -6217,8 +5873,7 @@ define void @or_xv_v16i8(<16 x i8>* %x, i8 %y) {
 define void @or_xv_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: or_xv_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vor.vx v25, v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -6234,8 +5889,7 @@ define void @or_xv_v8i16(<8 x i16>* %x, i16 %y) {
 define void @or_xv_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: or_xv_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vor.vx v25, v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -6251,8 +5905,7 @@ define void @or_xv_v4i32(<4 x i32>* %x, i32 %y) {
 define void @xor_vi_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: xor_vi_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vxor.vi v25, v25, -1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -6268,8 +5921,7 @@ define void @xor_vi_v16i8(<16 x i8>* %x) {
 define void @xor_vi_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: xor_vi_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vxor.vi v25, v25, -1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -6285,8 +5937,7 @@ define void @xor_vi_v8i16(<8 x i16>* %x) {
 define void @xor_vi_v4i32(<4 x i32>* %x) {
 ; CHECK-LABEL: xor_vi_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vxor.vi v25, v25, -1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -6302,8 +5953,7 @@ define void @xor_vi_v4i32(<4 x i32>* %x) {
 define void @xor_iv_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: xor_iv_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vxor.vi v25, v25, 1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -6319,8 +5969,7 @@ define void @xor_iv_v16i8(<16 x i8>* %x) {
 define void @xor_iv_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: xor_iv_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vxor.vi v25, v25, 1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -6336,8 +5985,7 @@ define void @xor_iv_v8i16(<8 x i16>* %x) {
 define void @xor_iv_v4i32(<4 x i32>* %x) {
 ; CHECK-LABEL: xor_iv_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vxor.vi v25, v25, 1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -6353,8 +6001,7 @@ define void @xor_iv_v4i32(<4 x i32>* %x) {
 define void @xor_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: xor_vx_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vxor.vx v25, v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -6370,8 +6017,7 @@ define void @xor_vx_v16i8(<16 x i8>* %x, i8 %y) {
 define void @xor_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: xor_vx_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vxor.vx v25, v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -6387,8 +6033,7 @@ define void @xor_vx_v8i16(<8 x i16>* %x, i16 %y) {
 define void @xor_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: xor_vx_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vxor.vx v25, v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -6404,8 +6049,7 @@ define void @xor_vx_v4i32(<4 x i32>* %x, i32 %y) {
 define void @xor_xv_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: xor_xv_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vxor.vx v25, v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -6421,8 +6065,7 @@ define void @xor_xv_v16i8(<16 x i8>* %x, i8 %y) {
 define void @xor_xv_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: xor_xv_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vxor.vx v25, v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -6438,8 +6081,7 @@ define void @xor_xv_v8i16(<8 x i16>* %x, i16 %y) {
 define void @xor_xv_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: xor_xv_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vxor.vx v25, v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -6455,8 +6097,7 @@ define void @xor_xv_v4i32(<4 x i32>* %x, i32 %y) {
 define void @lshr_vi_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: lshr_vi_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vsrl.vi v25, v25, 7
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -6472,8 +6113,7 @@ define void @lshr_vi_v16i8(<16 x i8>* %x) {
 define void @lshr_vi_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: lshr_vi_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vsrl.vi v25, v25, 15
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -6489,8 +6129,7 @@ define void @lshr_vi_v8i16(<8 x i16>* %x) {
 define void @lshr_vi_v4i32(<4 x i32>* %x) {
 ; CHECK-LABEL: lshr_vi_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vsrl.vi v25, v25, 31
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -6506,8 +6145,7 @@ define void @lshr_vi_v4i32(<4 x i32>* %x) {
 define void @lshr_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: lshr_vx_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -6523,8 +6161,7 @@ define void @lshr_vx_v16i8(<16 x i8>* %x, i8 %y) {
 define void @lshr_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: lshr_vx_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -6540,8 +6177,7 @@ define void @lshr_vx_v8i16(<8 x i16>* %x, i16 %y) {
 define void @lshr_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: lshr_vx_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -6557,8 +6193,7 @@ define void @lshr_vx_v4i32(<4 x i32>* %x, i32 %y) {
 define void @ashr_vi_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: ashr_vi_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vsra.vi v25, v25, 7
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -6574,8 +6209,7 @@ define void @ashr_vi_v16i8(<16 x i8>* %x) {
 define void @ashr_vi_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: ashr_vi_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vsra.vi v25, v25, 15
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -6591,8 +6225,7 @@ define void @ashr_vi_v8i16(<8 x i16>* %x) {
 define void @ashr_vi_v4i32(<4 x i32>* %x) {
 ; CHECK-LABEL: ashr_vi_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vsra.vi v25, v25, 31
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -6608,8 +6241,7 @@ define void @ashr_vi_v4i32(<4 x i32>* %x) {
 define void @ashr_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: ashr_vx_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vsra.vx v25, v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -6625,8 +6257,7 @@ define void @ashr_vx_v16i8(<16 x i8>* %x, i8 %y) {
 define void @ashr_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: ashr_vx_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vsra.vx v25, v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -6642,8 +6273,7 @@ define void @ashr_vx_v8i16(<8 x i16>* %x, i16 %y) {
 define void @ashr_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: ashr_vx_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vsra.vx v25, v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -6659,8 +6289,7 @@ define void @ashr_vx_v4i32(<4 x i32>* %x, i32 %y) {
 define void @shl_vi_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: shl_vi_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vsll.vi v25, v25, 7
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -6676,8 +6305,7 @@ define void @shl_vi_v16i8(<16 x i8>* %x) {
 define void @shl_vi_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: shl_vi_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vsll.vi v25, v25, 15
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -6693,8 +6321,7 @@ define void @shl_vi_v8i16(<8 x i16>* %x) {
 define void @shl_vi_v4i32(<4 x i32>* %x) {
 ; CHECK-LABEL: shl_vi_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vsll.vi v25, v25, 31
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -6710,8 +6337,7 @@ define void @shl_vi_v4i32(<4 x i32>* %x) {
 define void @shl_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: shl_vx_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vsll.vx v25, v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -6727,8 +6353,7 @@ define void @shl_vx_v16i8(<16 x i8>* %x, i8 %y) {
 define void @shl_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: shl_vx_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vsll.vx v25, v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -6744,8 +6369,7 @@ define void @shl_vx_v8i16(<8 x i16>* %x, i16 %y) {
 define void @shl_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: shl_vx_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vsll.vx v25, v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -6761,8 +6385,7 @@ define void @shl_vx_v4i32(<4 x i32>* %x, i32 %y) {
 define void @sdiv_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: sdiv_vx_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vdiv.vx v25, v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -6778,8 +6401,7 @@ define void @sdiv_vx_v16i8(<16 x i8>* %x, i8 %y) {
 define void @sdiv_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: sdiv_vx_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vdiv.vx v25, v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -6795,8 +6417,7 @@ define void @sdiv_vx_v8i16(<8 x i16>* %x, i16 %y) {
 define void @sdiv_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: sdiv_vx_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vdiv.vx v25, v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -6812,8 +6433,7 @@ define void @sdiv_vx_v4i32(<4 x i32>* %x, i32 %y) {
 define void @srem_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: srem_vx_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vrem.vx v25, v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -6829,8 +6449,7 @@ define void @srem_vx_v16i8(<16 x i8>* %x, i8 %y) {
 define void @srem_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: srem_vx_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vrem.vx v25, v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -6846,8 +6465,7 @@ define void @srem_vx_v8i16(<8 x i16>* %x, i16 %y) {
 define void @srem_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: srem_vx_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vrem.vx v25, v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -6863,8 +6481,7 @@ define void @srem_vx_v4i32(<4 x i32>* %x, i32 %y) {
 define void @udiv_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: udiv_vx_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vdivu.vx v25, v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -6880,8 +6497,7 @@ define void @udiv_vx_v16i8(<16 x i8>* %x, i8 %y) {
 define void @udiv_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: udiv_vx_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vdivu.vx v25, v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -6897,8 +6513,7 @@ define void @udiv_vx_v8i16(<8 x i16>* %x, i16 %y) {
 define void @udiv_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: udiv_vx_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vdivu.vx v25, v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -6914,8 +6529,7 @@ define void @udiv_vx_v4i32(<4 x i32>* %x, i32 %y) {
 define void @urem_vx_v16i8(<16 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: urem_vx_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vremu.vx v25, v25, a1
 ; CHECK-NEXT:    vse8.v v25, (a0)
@@ -6931,8 +6545,7 @@ define void @urem_vx_v16i8(<16 x i8>* %x, i8 %y) {
 define void @urem_vx_v8i16(<8 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: urem_vx_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vremu.vx v25, v25, a1
 ; CHECK-NEXT:    vse16.v v25, (a0)
@@ -6948,8 +6561,7 @@ define void @urem_vx_v8i16(<8 x i16>* %x, i16 %y) {
 define void @urem_vx_v4i32(<4 x i32>* %x, i32 %y) {
 ; CHECK-LABEL: urem_vx_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a0)
 ; CHECK-NEXT:    vremu.vx v25, v25, a1
 ; CHECK-NEXT:    vse32.v v25, (a0)
@@ -6965,8 +6577,7 @@ define void @urem_vx_v4i32(<4 x i32>* %x, i32 %y) {
 define void @mulhu_vx_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: mulhu_vx_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    addi a1, zero, 57
 ; CHECK-NEXT:    vmulhu.vx v25, v25, a1
@@ -6982,8 +6593,7 @@ define void @mulhu_vx_v16i8(<16 x i8>* %x) {
 define void @mulhu_vx_v8i16(<8 x i16>* %x) {
 ; LMULMAX1-RV32-LABEL: mulhu_vx_v8i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    lui a1, 2
 ; LMULMAX1-RV32-NEXT:    addi a1, a1, 1171
@@ -6997,8 +6607,7 @@ define void @mulhu_vx_v8i16(<8 x i16>* %x) {
 ;
 ; LMULMAX1-RV64-LABEL: mulhu_vx_v8i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    lui a1, 2
 ; LMULMAX1-RV64-NEXT:    addiw a1, a1, 1171
@@ -7018,8 +6627,7 @@ define void @mulhu_vx_v8i16(<8 x i16>* %x) {
 define void @mulhu_vx_v4i32(<4 x i32>* %x) {
 ; LMULMAX1-RV32-LABEL: mulhu_vx_v4i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    lui a1, 838861
 ; LMULMAX1-RV32-NEXT:    addi a1, a1, -819
@@ -7030,8 +6638,7 @@ define void @mulhu_vx_v4i32(<4 x i32>* %x) {
 ;
 ; LMULMAX1-RV64-LABEL: mulhu_vx_v4i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    lui a1, 838861
 ; LMULMAX1-RV64-NEXT:    addiw a1, a1, -819
@@ -7048,29 +6655,26 @@ define void @mulhu_vx_v4i32(<4 x i32>* %x) {
 define void @mulhu_vx_v2i64(<2 x i64>* %x) {
 ; LMULMAX1-RV32-LABEL: mulhu_vx_v2i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
-; LMULMAX1-RV32-NEXT:    lui a2, %hi(.LCPI252_0)
-; LMULMAX1-RV32-NEXT:    addi a2, a2, %lo(.LCPI252_0)
-; LMULMAX1-RV32-NEXT:    addi a3, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a4, a3, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI252_0)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI252_0)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vmulhu.vv v25, v25, v26
-; LMULMAX1-RV32-NEXT:    lui a2, %hi(.LCPI252_1)
-; LMULMAX1-RV32-NEXT:    addi a2, a2, %lo(.LCPI252_1)
-; LMULMAX1-RV32-NEXT:    vsetvli a3, a3, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
-; LMULMAX1-RV32-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI252_1)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI252_1)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vsrl.vv v25, v25, v26
 ; LMULMAX1-RV32-NEXT:    vse64.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    ret
 ;
 ; LMULMAX1-RV64-LABEL: mulhu_vx_v2i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    lui a1, 1026731
 ; LMULMAX1-RV64-NEXT:    addiw a1, a1, -1365
@@ -7093,8 +6697,7 @@ define void @mulhu_vx_v2i64(<2 x i64>* %x) {
 define void @mulhs_vx_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: mulhs_vx_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    addi a1, zero, -123
 ; CHECK-NEXT:    vmulhu.vx v25, v25, a1
@@ -7110,8 +6713,7 @@ define void @mulhs_vx_v16i8(<16 x i8>* %x) {
 define void @mulhs_vx_v8i16(<8 x i16>* %x) {
 ; LMULMAX1-RV32-LABEL: mulhs_vx_v8i16:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 8
-; LMULMAX1-RV32-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    lui a1, 5
 ; LMULMAX1-RV32-NEXT:    addi a1, a1, -1755
@@ -7124,8 +6726,7 @@ define void @mulhs_vx_v8i16(<8 x i16>* %x) {
 ;
 ; LMULMAX1-RV64-LABEL: mulhs_vx_v8i16:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 8
-; LMULMAX1-RV64-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    lui a1, 5
 ; LMULMAX1-RV64-NEXT:    addiw a1, a1, -1755
@@ -7144,8 +6745,7 @@ define void @mulhs_vx_v8i16(<8 x i16>* %x) {
 define void @mulhs_vx_v4i32(<4 x i32>* %x) {
 ; LMULMAX1-RV32-LABEL: mulhs_vx_v4i32:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    lui a1, 629146
 ; LMULMAX1-RV32-NEXT:    addi a1, a1, -1639
@@ -7158,8 +6758,7 @@ define void @mulhs_vx_v4i32(<4 x i32>* %x) {
 ;
 ; LMULMAX1-RV64-LABEL: mulhs_vx_v4i32:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 4
-; LMULMAX1-RV64-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    lui a1, 629146
 ; LMULMAX1-RV64-NEXT:    addiw a1, a1, -1639
@@ -7178,21 +6777,19 @@ define void @mulhs_vx_v4i32(<4 x i32>* %x) {
 define void @mulhs_vx_v2i64(<2 x i64>* %x) {
 ; LMULMAX1-RV32-LABEL: mulhs_vx_v2i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 2
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v25, (a0)
-; LMULMAX1-RV32-NEXT:    lui a2, %hi(.LCPI256_0)
-; LMULMAX1-RV32-NEXT:    addi a2, a2, %lo(.LCPI256_0)
-; LMULMAX1-RV32-NEXT:    addi a3, zero, 4
-; LMULMAX1-RV32-NEXT:    vsetvli a4, a3, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI256_0)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI256_0)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vmulh.vv v25, v25, v26
-; LMULMAX1-RV32-NEXT:    lui a2, %hi(.LCPI256_1)
-; LMULMAX1-RV32-NEXT:    addi a2, a2, %lo(.LCPI256_1)
-; LMULMAX1-RV32-NEXT:    vsetvli a3, a3, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT:    vle32.v v26, (a2)
-; LMULMAX1-RV32-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    lui a1, %hi(.LCPI256_1)
+; LMULMAX1-RV32-NEXT:    addi a1, a1, %lo(.LCPI256_1)
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vsrl.vv v26, v25, v26
 ; LMULMAX1-RV32-NEXT:    vadd.vv v25, v25, v26
 ; LMULMAX1-RV32-NEXT:    vse64.v v25, (a0)
@@ -7200,8 +6797,7 @@ define void @mulhs_vx_v2i64(<2 x i64>* %x) {
 ;
 ; LMULMAX1-RV64-LABEL: mulhs_vx_v2i64:
 ; LMULMAX1-RV64:       # %bb.0:
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 2
-; LMULMAX1-RV64-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    lui a1, 21845
 ; LMULMAX1-RV64-NEXT:    addiw a1, a1, 1365

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll
index 3fbbcfe86bef..86aaaacb1ff7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll
@@ -7,8 +7,7 @@
 define void @load_store_v1i1(<1 x i1>* %x, <1 x i1>* %y) {
 ; CHECK-LABEL: load_store_v1i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 1
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 1, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v25, (a0)
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
@@ -20,8 +19,7 @@ define void @load_store_v1i1(<1 x i1>* %x, <1 x i1>* %y) {
 define void @load_store_v2i1(<2 x i1>* %x, <2 x i1>* %y) {
 ; CHECK-LABEL: load_store_v2i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 2
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 2, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v25, (a0)
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
@@ -33,8 +31,7 @@ define void @load_store_v2i1(<2 x i1>* %x, <2 x i1>* %y) {
 define void @load_store_v4i1(<4 x i1>* %x, <4 x i1>* %y) {
 ; CHECK-LABEL: load_store_v4i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 4
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 4, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v25, (a0)
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
@@ -46,8 +43,7 @@ define void @load_store_v4i1(<4 x i1>* %x, <4 x i1>* %y) {
 define void @load_store_v8i1(<8 x i1>* %x, <8 x i1>* %y) {
 ; CHECK-LABEL: load_store_v8i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v25, (a0)
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
@@ -59,8 +55,7 @@ define void @load_store_v8i1(<8 x i1>* %x, <8 x i1>* %y) {
 define void @load_store_v16i1(<16 x i1>* %x, <16 x i1>* %y) {
 ; CHECK-LABEL: load_store_v16i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v25, (a0)
 ; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll
index 89eeebf0aa2e..18e62d789656 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll
@@ -7,8 +7,7 @@
 define void @and_v8i1(<8 x i1>* %x, <8 x i1>* %y) {
 ; CHECK-LABEL: and_v8i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v25, (a0)
 ; CHECK-NEXT:    vle1.v v26, (a1)
 ; CHECK-NEXT:    vmand.mm v25, v25, v26
@@ -24,8 +23,7 @@ define void @and_v8i1(<8 x i1>* %x, <8 x i1>* %y) {
 define void @or_v16i1(<16 x i1>* %x, <16 x i1>* %y) {
 ; CHECK-LABEL: or_v16i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v25, (a0)
 ; CHECK-NEXT:    vle1.v v26, (a1)
 ; CHECK-NEXT:    vmor.mm v25, v25, v26
@@ -74,8 +72,7 @@ define void @not_v64i1(<64 x i1>* %x, <64 x i1>* %y) {
 define void @andnot_v8i1(<8 x i1>* %x, <8 x i1>* %y) {
 ; CHECK-LABEL: andnot_v8i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v25, (a0)
 ; CHECK-NEXT:    vle1.v v26, (a1)
 ; CHECK-NEXT:    vmandnot.mm v25, v25, v26
@@ -92,8 +89,7 @@ define void @andnot_v8i1(<8 x i1>* %x, <8 x i1>* %y) {
 define void @ornot_v16i1(<16 x i1>* %x, <16 x i1>* %y) {
 ; CHECK-LABEL: ornot_v16i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v25, (a0)
 ; CHECK-NEXT:    vle1.v v26, (a1)
 ; CHECK-NEXT:    vmornot.mm v25, v25, v26
@@ -128,8 +124,7 @@ define void @xornot_v32i1(<32 x i1>* %x, <32 x i1>* %y) {
 define void @nand_v8i1(<8 x i1>* %x, <8 x i1>* %y) {
 ; CHECK-LABEL: nand_v8i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 8
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v25, (a0)
 ; CHECK-NEXT:    vle1.v v26, (a1)
 ; CHECK-NEXT:    vmnand.mm v25, v25, v26
@@ -146,8 +141,7 @@ define void @nand_v8i1(<8 x i1>* %x, <8 x i1>* %y) {
 define void @nor_v16i1(<16 x i1>* %x, <16 x i1>* %y) {
 ; CHECK-LABEL: nor_v16i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 16
-; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v25, (a0)
 ; CHECK-NEXT:    vle1.v v26, (a1)
 ; CHECK-NEXT:    vmnor.mm v25, v25, v26

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
index 944f57bd9129..10a40f862429 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
@@ -7,8 +7,7 @@
 define void @splat_ones_v1i1(<1 x i1>* %x) {
 ; CHECK-LABEL: splat_ones_v1i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmset.m v25
 ; CHECK-NEXT:    vse1.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -19,8 +18,7 @@ define void @splat_ones_v1i1(<1 x i1>* %x) {
 define void @splat_zeros_v2i1(<2 x i1>* %x) {
 ; CHECK-LABEL: splat_zeros_v2i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 2, e8,m1,ta,mu
 ; CHECK-NEXT:    vmclr.m v25
 ; CHECK-NEXT:    vse1.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -31,8 +29,7 @@ define void @splat_zeros_v2i1(<2 x i1>* %x) {
 define void @splat_ones_v4i1(<4 x i1>* %x) {
 ; CHECK-LABEL: splat_ones_v4i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 4
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 4, e8,m1,ta,mu
 ; CHECK-NEXT:    vmset.m v25
 ; CHECK-NEXT:    vse1.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -43,8 +40,7 @@ define void @splat_ones_v4i1(<4 x i1>* %x) {
 define void @splat_zeros_v8i1(<8 x i1>* %x) {
 ; CHECK-LABEL: splat_zeros_v8i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vmclr.m v25
 ; CHECK-NEXT:    vse1.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -55,8 +51,7 @@ define void @splat_zeros_v8i1(<8 x i1>* %x) {
 define void @splat_ones_v16i1(<16 x i1>* %x) {
 ; CHECK-LABEL: splat_ones_v16i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vmset.m v25
 ; CHECK-NEXT:    vse1.v v25, (a0)
 ; CHECK-NEXT:    ret
@@ -76,8 +71,7 @@ define void @splat_zeros_v32i1(<32 x i1>* %x) {
 ; LMULMAX1-RV32-LABEL: splat_zeros_v32i1:
 ; LMULMAX1-RV32:       # %bb.0:
 ; LMULMAX1-RV32-NEXT:    addi a1, a0, 2
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vmclr.m v25
 ; LMULMAX1-RV32-NEXT:    vse1.v v25, (a1)
 ; LMULMAX1-RV32-NEXT:    vse1.v v25, (a0)
@@ -86,8 +80,7 @@ define void @splat_zeros_v32i1(<32 x i1>* %x) {
 ; LMULMAX1-RV64-LABEL: splat_zeros_v32i1:
 ; LMULMAX1-RV64:       # %bb.0:
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 2
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vmclr.m v25
 ; LMULMAX1-RV64-NEXT:    vse1.v v25, (a1)
 ; LMULMAX1-RV64-NEXT:    vse1.v v25, (a0)
@@ -110,8 +103,7 @@ define void @splat_ones_v64i1(<64 x i1>* %x) {
 ; LMULMAX1-RV32-LABEL: splat_ones_v64i1:
 ; LMULMAX1-RV32:       # %bb.0:
 ; LMULMAX1-RV32-NEXT:    addi a1, a0, 6
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV32-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vmset.m v25
 ; LMULMAX1-RV32-NEXT:    vse1.v v25, (a1)
 ; LMULMAX1-RV32-NEXT:    addi a1, a0, 4
@@ -124,8 +116,7 @@ define void @splat_ones_v64i1(<64 x i1>* %x) {
 ; LMULMAX1-RV64-LABEL: splat_ones_v64i1:
 ; LMULMAX1-RV64:       # %bb.0:
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 6
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 16
-; LMULMAX1-RV64-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vmset.m v25
 ; LMULMAX1-RV64-NEXT:    vse1.v v25, (a1)
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 4

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
index 473e7273aa3c..14b730f8a34e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
@@ -5,13 +5,12 @@
 define void @vselect_vv_v8i32(<8 x i32>* %a, <8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %z) {
 ; CHECK-LABEL: vselect_vv_v8i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a4, zero, 8
-; CHECK-NEXT:    vsetvli a5, a4, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a4, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vle32.v v28, (a1)
-; CHECK-NEXT:    vsetvli a0, a4, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a2)
-; CHECK-NEXT:    vsetvli a0, a4, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v26, v28, v26, v0
 ; CHECK-NEXT:    vse32.v v26, (a3)
 ; CHECK-NEXT:    ret
@@ -26,12 +25,11 @@ define void @vselect_vv_v8i32(<8 x i32>* %a, <8 x i32>* %b, <8 x i1>* %cc, <8 x
 define void @vselect_vx_v8i32(i32 %a, <8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %z) {
 ; CHECK-LABEL: vselect_vx_v8i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a4, zero, 8
-; CHECK-NEXT:    vsetvli a5, a4, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a4, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a1)
-; CHECK-NEXT:    vsetvli a1, a4, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a2)
-; CHECK-NEXT:    vsetvli a1, a4, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v26, v26, a0, v0
 ; CHECK-NEXT:    vse32.v v26, (a3)
 ; CHECK-NEXT:    ret
@@ -47,12 +45,11 @@ define void @vselect_vx_v8i32(i32 %a, <8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %
 define void @vselect_vi_v8i32(<8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %z) {
 ; CHECK-LABEL: vselect_vi_v8i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 8
-; CHECK-NEXT:    vsetvli a4, a3, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a3, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a1)
-; CHECK-NEXT:    vsetvli a0, a3, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v26, v26, -1, v0
 ; CHECK-NEXT:    vse32.v v26, (a2)
 ; CHECK-NEXT:    ret
@@ -68,13 +65,12 @@ define void @vselect_vi_v8i32(<8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %z) {
 define void @vselect_vv_v8f32(<8 x float>* %a, <8 x float>* %b, <8 x i1>* %cc, <8 x float>* %z) {
 ; CHECK-LABEL: vselect_vv_v8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a4, zero, 8
-; CHECK-NEXT:    vsetvli a5, a4, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a4, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vle32.v v28, (a1)
-; CHECK-NEXT:    vsetvli a0, a4, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a2)
-; CHECK-NEXT:    vsetvli a0, a4, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v26, v28, v26, v0
 ; CHECK-NEXT:    vse32.v v26, (a3)
 ; CHECK-NEXT:    ret
@@ -89,12 +85,11 @@ define void @vselect_vv_v8f32(<8 x float>* %a, <8 x float>* %b, <8 x i1>* %cc, <
 define void @vselect_vx_v8f32(float %a, <8 x float>* %b, <8 x i1>* %cc, <8 x float>* %z) {
 ; CHECK-LABEL: vselect_vx_v8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 8
-; CHECK-NEXT:    vsetvli a4, a3, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a3, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a1)
-; CHECK-NEXT:    vsetvli a0, a3, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v26, v26, fa0, v0
 ; CHECK-NEXT:    vse32.v v26, (a2)
 ; CHECK-NEXT:    ret
@@ -110,12 +105,11 @@ define void @vselect_vx_v8f32(float %a, <8 x float>* %b, <8 x i1>* %cc, <8 x flo
 define void @vselect_vfpzero_v8f32(<8 x float>* %b, <8 x i1>* %cc, <8 x float>* %z) {
 ; CHECK-LABEL: vselect_vfpzero_v8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 8
-; CHECK-NEXT:    vsetvli a4, a3, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a3, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a1)
-; CHECK-NEXT:    vsetvli a0, a3, e32,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 8, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v26, v26, 0, v0
 ; CHECK-NEXT:    vse32.v v26, (a2)
 ; CHECK-NEXT:    ret
@@ -131,13 +125,12 @@ define void @vselect_vfpzero_v8f32(<8 x float>* %b, <8 x i1>* %cc, <8 x float>*
 define void @vselect_vv_v16i16(<16 x i16>* %a, <16 x i16>* %b, <16 x i1>* %cc, <16 x i16>* %z) {
 ; CHECK-LABEL: vselect_vv_v16i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a4, zero, 16
-; CHECK-NEXT:    vsetvli a5, a4, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a4, 16, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vsetvli a0, a4, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a2)
-; CHECK-NEXT:    vsetvli a0, a4, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v26, v28, v26, v0
 ; CHECK-NEXT:    vse16.v v26, (a3)
 ; CHECK-NEXT:    ret
@@ -152,12 +145,11 @@ define void @vselect_vv_v16i16(<16 x i16>* %a, <16 x i16>* %b, <16 x i1>* %cc, <
 define void @vselect_vx_v16i16(i16 signext %a, <16 x i16>* %b, <16 x i1>* %cc, <16 x i16>* %z) {
 ; CHECK-LABEL: vselect_vx_v16i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a4, zero, 16
-; CHECK-NEXT:    vsetvli a5, a4, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a4, 16, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a1)
-; CHECK-NEXT:    vsetvli a1, a4, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a2)
-; CHECK-NEXT:    vsetvli a1, a4, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a1, 16, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v26, v26, a0, v0
 ; CHECK-NEXT:    vse16.v v26, (a3)
 ; CHECK-NEXT:    ret
@@ -173,12 +165,11 @@ define void @vselect_vx_v16i16(i16 signext %a, <16 x i16>* %b, <16 x i1>* %cc, <
 define void @vselect_vi_v16i16(<16 x i16>* %b, <16 x i1>* %cc, <16 x i16>* %z) {
 ; CHECK-LABEL: vselect_vi_v16i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 16
-; CHECK-NEXT:    vsetvli a4, a3, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a3, 16, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a0)
-; CHECK-NEXT:    vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a1)
-; CHECK-NEXT:    vsetvli a0, a3, e16,m2,ta,mu
+; CHECK-NEXT:    vsetivli a0, 16, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v26, v26, 4, v0
 ; CHECK-NEXT:    vse16.v v26, (a2)
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll
index c70f0d744419..f9adf118a467 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll
@@ -1154,10 +1154,9 @@ define i64 @vreduce_add_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vredsum.vs v25, v8, v25
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v25, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
@@ -1174,10 +1173,9 @@ define i64 @vreduce_umax_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vredmaxu.vs v25, v8, v25
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v25, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
@@ -1201,8 +1199,7 @@ define i64 @vreduce_smax_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vor.vv v25, v26, v25
 ; CHECK-NEXT:    vredmax.vs v25, v8, v25
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a2, zero, 1
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
@@ -1220,10 +1217,9 @@ define i64 @vreduce_umin_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vmv.v.i v25, -1
 ; CHECK-NEXT:    vredminu.vs v25, v8, v25
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v25, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
@@ -1248,8 +1244,7 @@ define i64 @vreduce_smin_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vor.vv v25, v25, v26
 ; CHECK-NEXT:    vredmin.vs v25, v8, v25
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a2, zero, 1
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
@@ -1267,10 +1262,9 @@ define i64 @vreduce_and_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vmv.v.i v25, -1
 ; CHECK-NEXT:    vredand.vs v25, v8, v25
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v25, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
@@ -1287,10 +1281,9 @@ define i64 @vreduce_or_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vredor.vs v25, v8, v25
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v25, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
@@ -1307,10 +1300,9 @@ define i64 @vreduce_xor_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vredxor.vs v25, v8, v25
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v25, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
@@ -1329,10 +1321,9 @@ define i64 @vreduce_add_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vredsum.vs v25, v8, v25
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v25, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
@@ -1351,10 +1342,9 @@ define i64 @vreduce_umax_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vredmaxu.vs v25, v8, v25
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v25, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
@@ -1380,8 +1370,7 @@ define i64 @vreduce_smax_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vredmax.vs v25, v8, v25
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a2, zero, 1
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
@@ -1401,10 +1390,9 @@ define i64 @vreduce_umin_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vredminu.vs v25, v8, v25
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v25, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
@@ -1431,8 +1419,7 @@ define i64 @vreduce_smin_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vredmin.vs v25, v8, v25
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a2, zero, 1
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
@@ -1452,10 +1439,9 @@ define i64 @vreduce_and_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vredand.vs v25, v8, v25
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v25, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
@@ -1474,10 +1460,9 @@ define i64 @vreduce_or_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vredor.vs v25, v8, v25
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v25, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
@@ -1496,10 +1481,9 @@ define i64 @vreduce_xor_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vredxor.vs v25, v8, v25
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v25, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
@@ -1518,10 +1502,9 @@ define i64 @vreduce_add_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vredsum.vs v25, v8, v25
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v25, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
@@ -1540,10 +1523,9 @@ define i64 @vreduce_umax_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vredmaxu.vs v25, v8, v25
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v25, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
@@ -1569,8 +1551,7 @@ define i64 @vreduce_smax_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vredmax.vs v25, v8, v25
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a2, zero, 1
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
@@ -1590,10 +1571,9 @@ define i64 @vreduce_umin_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vredminu.vs v25, v8, v25
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v25, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
@@ -1620,8 +1600,7 @@ define i64 @vreduce_smin_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vredmin.vs v25, v8, v25
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a2, zero, 1
-; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
@@ -1641,10 +1620,9 @@ define i64 @vreduce_and_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vredand.vs v25, v8, v25
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v25, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
@@ -1663,10 +1641,9 @@ define i64 @vreduce_or_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vredor.vs v25, v8, v25
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v25, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret
@@ -1685,10 +1662,9 @@ define i64 @vreduce_xor_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vredxor.vs v25, v8, v25
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    addi a1, zero, 1
-; CHECK-NEXT:    addi a2, zero, 32
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsrl.vx v25, v25, a2
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT:    vsrl.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.x.s a1, v25
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll
index 925454b391e7..dcfc769eafd2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll
@@ -11,11 +11,10 @@ declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.mask.nxv16i
 define <vscale x 16 x i16> @test_vlseg2_mask_nxv16i16(i16* %base, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vlseg2_mask_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    mv a1, zero
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 0, e16,m4,ta,mu
 ; CHECK-NEXT:    vlseg2e16.v v4, (a0)
 ; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetivli a1, 0, e16,m4,tu,mu
 ; CHECK-NEXT:    vlseg2e16.v v4, (a0), v0.t
 ; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
@@ -33,11 +32,10 @@ declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.mask.nxv16
 define <vscale x 16 x i16> @test_vlsseg2_mask_nxv16i16(i16* %base, i64 %offset, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vlsseg2_mask_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    mv a2, zero
-; CHECK-NEXT:    vsetvli a3, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a2, 0, e16,m4,ta,mu
 ; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1
 ; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    vsetvli a2, a2, e16,m4,tu,mu
+; CHECK-NEXT:    vsetivli a2, 0, e16,m4,tu,mu
 ; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1, v0.t
 ; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
@@ -54,11 +52,10 @@ declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vloxseg2.mask.nxv1
 define <vscale x 16 x i16> @test_vloxseg2_mask_nxv16i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    mv a1, zero
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 0, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxseg2ei16.v v12, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetivli a1, 0, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxseg2ei16.v v12, (a0), v8, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
@@ -76,11 +73,10 @@ declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv1
 define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    mv a1, zero
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 0, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vsetivli a1, 0, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
@@ -98,8 +94,7 @@ declare {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.mask
 define <vscale x 16 x i16> @test_vlseg2ff_nxv16i16(i16* %base, i64* %outvl) {
 ; CHECK-LABEL: test_vlseg2ff_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    mv a2, zero
-; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a2, 0, e16,m4,ta,mu
 ; CHECK-NEXT:    vlseg2e16ff.v v4, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a1)
@@ -117,8 +112,7 @@ define <vscale x 16 x i16> @test_vlseg2ff_mask_nxv16i16(<vscale x 16 x i16> %val
 ; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv4r.v v4, v8
-; CHECK-NEXT:    mv a2, zero
-; CHECK-NEXT:    vsetvli a2, a2, e16,m4,tu,mu
+; CHECK-NEXT:    vsetivli a2, 0, e16,m4,tu,mu
 ; CHECK-NEXT:    vlseg2e16ff.v v4, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a1)
@@ -140,8 +134,7 @@ define void @test_vsseg2_nxv16i16(<vscale x 16 x i16> %val, i16* %base) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    mv a1, zero
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
 ; CHECK-NEXT:    ret
 entry:
@@ -154,8 +147,7 @@ define void @test_vsseg2_mask_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vs
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    mv a1, zero
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -171,8 +163,7 @@ define void @test_vssseg2_nxv16i16(<vscale x 16 x i16> %val, i16* %base, i64 %of
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    mv a2, zero
-; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a2, 0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
 ; CHECK-NEXT:    ret
 entry:
@@ -185,8 +176,7 @@ define void @test_vssseg2_mask_nxv16i16(<vscale x 16 x i16> %val, i16* %base, i6
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    mv a2, zero
-; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a2, 0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -203,8 +193,7 @@ define void @test_vsoxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, i16* %bas
 ; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v28, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    mv a1, zero
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxseg2ei16.v v8, (a0), v28
 ; CHECK-NEXT:    ret
 entry:
@@ -218,8 +207,7 @@ define void @test_vsoxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, i16*
 ; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v28, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    mv a1, zero
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxseg2ei16.v v8, (a0), v28, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -236,8 +224,7 @@ define void @test_vsuxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, i16* %bas
 ; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v28, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    mv a1, zero
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v28
 ; CHECK-NEXT:    ret
 entry:
@@ -251,8 +238,7 @@ define void @test_vsuxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, i16*
 ; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v28, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    mv a1, zero
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vsetivli a1, 0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v28, v0.t
 ; CHECK-NEXT:    ret
 entry:


        


More information about the llvm-commits mailing list