[llvm] [RISCV][VSETVLI] Prefer VTYPE for immediate known to be less than VLMAX (PR #69759)
Philip Reames via llvm-commits
llvm-commits at lists.llvm.org
Fri Oct 20 12:37:31 PDT 2023
https://github.com/preames created https://github.com/llvm/llvm-project/pull/69759
If we have a vsetvli which is toggling from a state with a constant AVL, to a state with the same constant AVL, then the vsetvli can use the x0, x0 VL preserving form provided that said VL is less than or equal to the minimal VLMAX of either state. (i.e. that AVL=VL for both states)
VTYPE-only toggles are generally cheaper, and these patterns show up a lot with mixed width arithmetic and large types which have been legalized via splitting.
Meta comments for the review:
* I noticed this opportunity in the delta from https://github.com/llvm/llvm-project/pull/69259. I honestly feel quite silly for never noticing it before as it turned out to be both fairly trivial to implement and quite wide spread.
* There's two refactorings in the current patch - one is just code motion and a rename, the other is adding the subtarget variable to the pass. Happy to separate these and land them if reviewers find it helpful.
>From 622682a707bc53b5ee411b8d6778b77299832749 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Fri, 20 Oct 2023 12:26:59 -0700
Subject: [PATCH] [RISCV][VSETVLI] Prefer VTYPE for immediate known to be less
than VLMAX
If we have a vsetvli which is toggling from a state with a constant AVL,
to a state with the same constant AVL, then the vsetvli can use the x0, x0
VL preserving form provided that said VL is less than or equal to the
minimal VLMAX of either state. (i.e. that AVL=VL for both states)
VTYPE-only toggles are generally cheaper, and these patterns show up a
lot with mixed width arithmetic and large types which have been legalized
via splitting.
---
llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 67 ++---
.../rvv/fixed-vectors-extload-truncstore.ll | 200 +++++++-------
.../rvv/fixed-vectors-extract-subvector.ll | 42 +--
.../RISCV/rvv/fixed-vectors-fp-conv.ll | 10 +-
.../RISCV/rvv/fixed-vectors-fp-interleave.ll | 8 +-
.../CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll | 16 +-
.../CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll | 28 +-
.../fixed-vectors-insert-subvector-shuffle.ll | 8 +-
.../rvv/fixed-vectors-insert-subvector.ll | 12 +-
.../rvv/fixed-vectors-int-explodevector.ll | 10 +-
.../RISCV/rvv/fixed-vectors-int-exttrunc.ll | 24 +-
.../RISCV/rvv/fixed-vectors-int-interleave.ll | 14 +-
.../CodeGen/RISCV/rvv/fixed-vectors-int.ll | 16 +-
.../rvv/fixed-vectors-interleave-store.ll | 2 +-
.../rvv/fixed-vectors-interleaved-access.ll | 12 +-
.../RISCV/rvv/fixed-vectors-masked-gather.ll | 4 +-
.../RISCV/rvv/fixed-vectors-masked-scatter.ll | 244 +++++++++---------
.../RISCV/rvv/fixed-vectors-reduction-fp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-reduction-int.ll | 16 +-
.../rvv/fixed-vectors-strided-load-combine.ll | 2 +-
.../RISCV/rvv/fixed-vectors-unaligned.ll | 4 +-
.../CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll | 4 +-
.../CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll | 6 +-
.../CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll | 6 +-
.../RISCV/rvv/fixed-vectors-vpgather.ll | 24 +-
.../RISCV/rvv/fixed-vectors-vpscatter.ll | 10 +-
.../CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll | 2 +-
.../CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll | 2 +-
.../CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vwmulsu.ll | 2 +-
.../CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll | 2 +-
.../CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll | 2 +-
.../CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll | 2 +-
.../RISCV/rvv/vector-interleave-fixed.ll | 4 +-
.../RISCV/rvv/vsetvli-valid-elen-fp.ll | 2 +-
.../CodeGen/RISCV/srem-seteq-illegal-types.ll | 6 +-
36 files changed, 412 insertions(+), 405 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 4c99da1244bf50c..bf7dd2359d9b458 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -720,6 +720,7 @@ struct BlockData {
};
class RISCVInsertVSETVLI : public MachineFunctionPass {
+ const RISCVSubtarget *ST;
const TargetInstrInfo *TII;
MachineRegisterInfo *MRI;
@@ -860,6 +861,28 @@ static VSETVLIInfo getInfoForVSETVLI(const MachineInstr &MI) {
return NewInfo;
}
+/// Return true if the VL value configured must be equal to the requested one.
+static bool willVLBeAVL(const VSETVLIInfo &Info, const RISCVSubtarget &ST) {
+ if (!Info.hasAVLImm())
+ // VLMAX is always the same value.
+ // TODO: Could extend to other registers by looking at the associated vreg
+ // def placement.
+ return RISCV::X0 == Info.getAVLReg();
+
+ unsigned AVL = Info.getAVLImm();
+ unsigned SEW = Info.getSEW();
+ unsigned AVLInBits = AVL * SEW;
+
+ unsigned LMul;
+ bool Fractional;
+ std::tie(LMul, Fractional) = RISCVVType::decodeVLMUL(Info.getVLMUL());
+
+ if (Fractional)
+ return ST.getRealMinVLen() / LMul >= AVLInBits;
+ return ST.getRealMinVLen() * LMul >= AVLInBits;
+}
+
+
/// Return true if a vsetvli instruction to change from PrevInfo
/// to Info might change the VL register. If this returns false,
/// the vsetvli can use the X0, X0 form.
@@ -885,6 +908,15 @@ bool RISCVInsertVSETVLI::mayChangeVL(const VSETVLIInfo &Info,
return false;
}
}
+
+ // For constant AVL values less than VLMAX, we know that VL=AVL and thus
+ // if the two AVLs are the same, we know the VLs must also be. As such,
+ // this vsetvli is not changing VL.
+ if (Info.hasAVLImm() && PrevInfo.hasAVLImm() &&
+ Info.getAVLImm() == PrevInfo.getAVLImm() &&
+ willVLBeAVL(Info, *ST) && willVLBeAVL(PrevInfo, *ST))
+ return false;
+
return true;
}
@@ -966,8 +998,7 @@ bool RISCVInsertVSETVLI::needVSETVLI(const MachineInstr &MI,
return true;
DemandedFields Used = getDemanded(MI, MRI);
- bool HasVInstructionsF64 =
- MI.getMF()->getSubtarget<RISCVSubtarget>().hasVInstructionsF64();
+ bool HasVInstructionsF64 = ST->hasVInstructionsF64();
// A slidedown/slideup with an *undefined* merge op can freely clobber
// elements not copied from the source vector (e.g. masked off, tail, or
@@ -1307,36 +1338,12 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
}
}
-/// Return true if the VL value configured must be equal to the requested one.
-static bool hasFixedResult(const VSETVLIInfo &Info, const RISCVSubtarget &ST) {
- if (!Info.hasAVLImm())
- // VLMAX is always the same value.
- // TODO: Could extend to other registers by looking at the associated vreg
- // def placement.
- return RISCV::X0 == Info.getAVLReg();
-
- unsigned AVL = Info.getAVLImm();
- unsigned SEW = Info.getSEW();
- unsigned AVLInBits = AVL * SEW;
-
- unsigned LMul;
- bool Fractional;
- std::tie(LMul, Fractional) = RISCVVType::decodeVLMUL(Info.getVLMUL());
-
- if (Fractional)
- return ST.getRealMinVLen() / LMul >= AVLInBits;
- return ST.getRealMinVLen() * LMul >= AVLInBits;
-}
-
/// Perform simple partial redundancy elimination of the VSETVLI instructions
/// we're about to insert by looking for cases where we can PRE from the
/// beginning of one block to the end of one of its predecessors. Specifically,
/// this is geared to catch the common case of a fixed length vsetvl in a single
/// block loop when it could execute once in the preheader instead.
void RISCVInsertVSETVLI::doPRE(MachineBasicBlock &MBB) {
- const MachineFunction &MF = *MBB.getParent();
- const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
-
if (!BlockInfo[MBB.getNumber()].Pred.isUnknown())
return;
@@ -1365,7 +1372,7 @@ void RISCVInsertVSETVLI::doPRE(MachineBasicBlock &MBB) {
return;
// If VL can be less than AVL, then we can't reduce the frequency of exec.
- if (!hasFixedResult(AvailableInfo, ST))
+ if (!willVLBeAVL(AvailableInfo, *ST))
return;
// Model the effect of changing the input state of the block MBB to
@@ -1534,13 +1541,13 @@ void RISCVInsertVSETVLI::insertReadVL(MachineBasicBlock &MBB) {
bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) {
// Skip if the vector extension is not enabled.
- const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
- if (!ST.hasVInstructions())
+ ST = &MF.getSubtarget<RISCVSubtarget>();
+ if (!ST->hasVInstructions())
return false;
LLVM_DEBUG(dbgs() << "Entering InsertVSETVLI for " << MF.getName() << "\n");
- TII = ST.getInstrInfo();
+ TII = ST->getInstrInfo();
MRI = &MF.getRegInfo();
assert(BlockInfo.empty() && "Expect empty block infos");
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
index 4aaefb24d5aa279..16ef2bdedae7745 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll
@@ -144,7 +144,7 @@ define <4 x i64> @sextload_v4i8_v4i64(ptr %x) {
; LMULMAX1-NEXT: vle8.v v10, (a0)
; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf8 v9, v8
; LMULMAX1-NEXT: vsext.vf8 v8, v10
; LMULMAX1-NEXT: ret
@@ -167,7 +167,7 @@ define <4 x i64> @zextload_v4i8_v4i64(ptr %x) {
; LMULMAX1-NEXT: vle8.v v10, (a0)
; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf8 v9, v8
; LMULMAX1-NEXT: vzext.vf8 v8, v10
; LMULMAX1-NEXT: ret
@@ -214,7 +214,7 @@ define <8 x i32> @sextload_v8i8_v8i32(ptr %x) {
; LMULMAX1-NEXT: vle8.v v10, (a0)
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v9, v8
; LMULMAX1-NEXT: vsext.vf4 v8, v10
; LMULMAX1-NEXT: ret
@@ -237,7 +237,7 @@ define <8 x i32> @zextload_v8i8_v8i32(ptr %x) {
; LMULMAX1-NEXT: vle8.v v10, (a0)
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf4 v9, v8
; LMULMAX1-NEXT: vzext.vf4 v8, v10
; LMULMAX1-NEXT: ret
@@ -264,13 +264,13 @@ define <8 x i64> @sextload_v8i8_v8i64(ptr %x) {
; LMULMAX1-NEXT: vslidedown.vi v11, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf8 v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v12, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf8 v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v12, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf8 v9, v12
; LMULMAX1-NEXT: ret
;
@@ -296,13 +296,13 @@ define <8 x i64> @zextload_v8i8_v8i64(ptr %x) {
; LMULMAX1-NEXT: vslidedown.vi v11, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf8 v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v12, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf8 v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v12, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf8 v9, v12
; LMULMAX1-NEXT: ret
;
@@ -324,7 +324,7 @@ define <16 x i16> @sextload_v16i8_v16i16(ptr %x) {
; LMULMAX1-NEXT: vle8.v v10, (a0)
; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 8
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf2 v9, v8
; LMULMAX1-NEXT: vsext.vf2 v8, v10
; LMULMAX1-NEXT: ret
@@ -347,7 +347,7 @@ define <16 x i16> @zextload_v16i8_v16i16(ptr %x) {
; LMULMAX1-NEXT: vle8.v v10, (a0)
; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 8
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf2 v9, v8
; LMULMAX1-NEXT: vzext.vf2 v8, v10
; LMULMAX1-NEXT: ret
@@ -374,13 +374,13 @@ define <16 x i32> @sextload_v16i8_v16i32(ptr %x) {
; LMULMAX1-NEXT: vslidedown.vi v11, v9, 8
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v12, v11, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v12, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v9, v12
; LMULMAX1-NEXT: ret
;
@@ -406,13 +406,13 @@ define <16 x i32> @zextload_v16i8_v16i32(ptr %x) {
; LMULMAX1-NEXT: vslidedown.vi v11, v9, 8
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf4 v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v12, v11, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf4 v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v12, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf4 v9, v12
; LMULMAX1-NEXT: ret
;
@@ -438,29 +438,29 @@ define <16 x i64> @sextload_v16i8_v16i64(ptr %x) {
; LMULMAX1-NEXT: vslidedown.vi v11, v10, 8
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf8 v12, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v13, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf8 v9, v13
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v14, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf8 v13, v14
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v11, v11, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf8 v14, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v11, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf8 v15, v11
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v11, v10, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf8 v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v16, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf8 v11, v16
; LMULMAX1-NEXT: ret
;
@@ -470,7 +470,7 @@ define <16 x i64> @sextload_v16i8_v16i64(ptr %x) {
; LMULMAX4-NEXT: vle8.v v16, (a0)
; LMULMAX4-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; LMULMAX4-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; LMULMAX4-NEXT: vsext.vf8 v12, v8
; LMULMAX4-NEXT: vsext.vf8 v8, v16
; LMULMAX4-NEXT: ret
@@ -490,29 +490,29 @@ define <16 x i64> @zextload_v16i8_v16i64(ptr %x) {
; LMULMAX1-NEXT: vslidedown.vi v11, v10, 8
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf8 v12, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v13, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf8 v9, v13
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v14, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf8 v13, v14
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v11, v11, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf8 v14, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v11, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf8 v15, v11
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v11, v10, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf8 v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v16, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf8 v11, v16
; LMULMAX1-NEXT: ret
;
@@ -522,7 +522,7 @@ define <16 x i64> @zextload_v16i8_v16i64(ptr %x) {
; LMULMAX4-NEXT: vle8.v v16, (a0)
; LMULMAX4-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; LMULMAX4-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; LMULMAX4-NEXT: vzext.vf8 v12, v8
; LMULMAX4-NEXT: vzext.vf8 v8, v16
; LMULMAX4-NEXT: ret
@@ -655,7 +655,7 @@ define <4 x i64> @sextload_v4i16_v4i64(ptr %x) {
; LMULMAX1-NEXT: vle16.v v10, (a0)
; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v9, v8
; LMULMAX1-NEXT: vsext.vf4 v8, v10
; LMULMAX1-NEXT: ret
@@ -678,7 +678,7 @@ define <4 x i64> @zextload_v4i16_v4i64(ptr %x) {
; LMULMAX1-NEXT: vle16.v v10, (a0)
; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf4 v9, v8
; LMULMAX1-NEXT: vzext.vf4 v8, v10
; LMULMAX1-NEXT: ret
@@ -713,7 +713,7 @@ define <8 x i32> @sextload_v8i16_v8i32(ptr %x) {
; LMULMAX1-NEXT: vle16.v v10, (a0)
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf2 v9, v8
; LMULMAX1-NEXT: vsext.vf2 v8, v10
; LMULMAX1-NEXT: ret
@@ -736,7 +736,7 @@ define <8 x i32> @zextload_v8i16_v8i32(ptr %x) {
; LMULMAX1-NEXT: vle16.v v10, (a0)
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf2 v9, v8
; LMULMAX1-NEXT: vzext.vf2 v8, v10
; LMULMAX1-NEXT: ret
@@ -763,13 +763,13 @@ define <8 x i64> @sextload_v8i16_v8i64(ptr %x) {
; LMULMAX1-NEXT: vslidedown.vi v11, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v12, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v12, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v9, v12
; LMULMAX1-NEXT: ret
;
@@ -795,13 +795,13 @@ define <8 x i64> @zextload_v8i16_v8i64(ptr %x) {
; LMULMAX1-NEXT: vslidedown.vi v11, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf4 v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v12, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf4 v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v12, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf4 v9, v12
; LMULMAX1-NEXT: ret
;
@@ -847,12 +847,12 @@ define <16 x i32> @sextload_v16i16_v16i32(ptr %x) {
; LMULMAX1-NEXT: vle16.v v12, (a0)
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf2 v9, v8
; LMULMAX1-NEXT: vsext.vf2 v8, v10
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v10, v12, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf2 v11, v10
; LMULMAX1-NEXT: vsext.vf2 v10, v12
; LMULMAX1-NEXT: ret
@@ -877,12 +877,12 @@ define <16 x i32> @zextload_v16i16_v16i32(ptr %x) {
; LMULMAX1-NEXT: vle16.v v12, (a0)
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf2 v9, v8
; LMULMAX1-NEXT: vzext.vf2 v8, v10
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v10, v12, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf2 v11, v10
; LMULMAX1-NEXT: vzext.vf2 v10, v12
; LMULMAX1-NEXT: ret
@@ -911,26 +911,26 @@ define <16 x i64> @sextload_v16i16_v16i64(ptr %x) {
; LMULMAX1-NEXT: vslidedown.vi v11, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v12, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v12, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v9, v12
; LMULMAX1-NEXT: vsext.vf4 v12, v13
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v15, v13, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v14, v15
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v16, v15, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v15, v16
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v16, v13, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v13, v16
; LMULMAX1-NEXT: ret
;
@@ -940,7 +940,7 @@ define <16 x i64> @sextload_v16i16_v16i64(ptr %x) {
; LMULMAX4-NEXT: vle16.v v16, (a0)
; LMULMAX4-NEXT: vsetivli zero, 8, e16, m2, ta, ma
; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; LMULMAX4-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; LMULMAX4-NEXT: vsext.vf4 v12, v8
; LMULMAX4-NEXT: vsext.vf4 v8, v16
; LMULMAX4-NEXT: ret
@@ -962,26 +962,26 @@ define <16 x i64> @zextload_v16i16_v16i64(ptr %x) {
; LMULMAX1-NEXT: vslidedown.vi v11, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf4 v10, v11
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v12, v11, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf4 v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v12, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf4 v9, v12
; LMULMAX1-NEXT: vzext.vf4 v12, v13
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v15, v13, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf4 v14, v15
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v16, v15, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf4 v15, v16
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v16, v13, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf4 v13, v16
; LMULMAX1-NEXT: ret
;
@@ -991,7 +991,7 @@ define <16 x i64> @zextload_v16i16_v16i64(ptr %x) {
; LMULMAX4-NEXT: vle16.v v16, (a0)
; LMULMAX4-NEXT: vsetivli zero, 8, e16, m2, ta, ma
; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; LMULMAX4-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; LMULMAX4-NEXT: vzext.vf4 v12, v8
; LMULMAX4-NEXT: vzext.vf4 v8, v16
; LMULMAX4-NEXT: ret
@@ -1083,7 +1083,7 @@ define <4 x i64> @sextload_v4i32_v4i64(ptr %x) {
; LMULMAX1-NEXT: vle32.v v10, (a0)
; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf2 v9, v8
; LMULMAX1-NEXT: vsext.vf2 v8, v10
; LMULMAX1-NEXT: ret
@@ -1106,7 +1106,7 @@ define <4 x i64> @zextload_v4i32_v4i64(ptr %x) {
; LMULMAX1-NEXT: vle32.v v10, (a0)
; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf2 v9, v8
; LMULMAX1-NEXT: vzext.vf2 v8, v10
; LMULMAX1-NEXT: ret
@@ -1182,12 +1182,12 @@ define <8 x i64> @sextload_v8i32_v8i64(ptr %x) {
; LMULMAX1-NEXT: vle32.v v12, (a0)
; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf2 v9, v8
; LMULMAX1-NEXT: vsext.vf2 v8, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v10, v12, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf2 v11, v10
; LMULMAX1-NEXT: vsext.vf2 v10, v12
; LMULMAX1-NEXT: ret
@@ -1212,12 +1212,12 @@ define <8 x i64> @zextload_v8i32_v8i64(ptr %x) {
; LMULMAX1-NEXT: vle32.v v12, (a0)
; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf2 v9, v8
; LMULMAX1-NEXT: vzext.vf2 v8, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v10, v12, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf2 v11, v10
; LMULMAX1-NEXT: vzext.vf2 v10, v12
; LMULMAX1-NEXT: ret
@@ -1316,20 +1316,20 @@ define <16 x i64> @sextload_v16i32_v16i64(ptr %x) {
; LMULMAX1-NEXT: vle32.v v12, (a0)
; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf2 v9, v8
; LMULMAX1-NEXT: vsext.vf2 v8, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v10, v12, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf2 v11, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v10, v14, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf2 v13, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v10, v16, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf2 v15, v10
; LMULMAX1-NEXT: vsext.vf2 v10, v12
; LMULMAX1-NEXT: vsext.vf2 v12, v14
@@ -1342,7 +1342,7 @@ define <16 x i64> @sextload_v16i32_v16i64(ptr %x) {
; LMULMAX4-NEXT: vle32.v v16, (a0)
; LMULMAX4-NEXT: vsetivli zero, 8, e32, m4, ta, ma
; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; LMULMAX4-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; LMULMAX4-NEXT: vsext.vf2 v12, v8
; LMULMAX4-NEXT: vsext.vf2 v8, v16
; LMULMAX4-NEXT: ret
@@ -1364,20 +1364,20 @@ define <16 x i64> @zextload_v16i32_v16i64(ptr %x) {
; LMULMAX1-NEXT: vle32.v v12, (a0)
; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf2 v9, v8
; LMULMAX1-NEXT: vzext.vf2 v8, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v10, v12, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf2 v11, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v10, v14, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf2 v13, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v10, v16, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; LMULMAX1-NEXT: vzext.vf2 v15, v10
; LMULMAX1-NEXT: vzext.vf2 v10, v12
; LMULMAX1-NEXT: vzext.vf2 v12, v14
@@ -1390,7 +1390,7 @@ define <16 x i64> @zextload_v16i32_v16i64(ptr %x) {
; LMULMAX4-NEXT: vle32.v v16, (a0)
; LMULMAX4-NEXT: vsetivli zero, 8, e32, m4, ta, ma
; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8
-; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; LMULMAX4-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; LMULMAX4-NEXT: vzext.vf2 v12, v8
; LMULMAX4-NEXT: vzext.vf2 v8, v16
; LMULMAX4-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
index b4260b04604cd06..89e04eaf9e373b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
@@ -23,7 +23,7 @@ define void @extract_v2i8_v4i8_2(ptr %x, ptr %y) {
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
-; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; CHECK-NEXT: vse8.v v8, (a1)
; CHECK-NEXT: ret
%a = load <4 x i8>, ptr %x
@@ -53,7 +53,7 @@ define void @extract_v2i8_v8i8_6(ptr %x, ptr %y) {
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 6
-; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; CHECK-NEXT: vse8.v v8, (a1)
; CHECK-NEXT: ret
%a = load <8 x i8>, ptr %x
@@ -69,7 +69,7 @@ define void @extract_v1i32_v8i32_4(ptr %x, ptr %y) {
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 4
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vse32.v v8, (a1)
; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
@@ -85,7 +85,7 @@ define void @extract_v1i32_v8i32_5(ptr %x, ptr %y) {
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 5
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vse32.v v8, (a1)
; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
@@ -115,7 +115,7 @@ define void @extract_v2i32_v8i32_2(ptr %x, ptr %y) {
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vse32.v v8, (a1)
; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
@@ -131,7 +131,7 @@ define void @extract_v2i32_v8i32_4(ptr %x, ptr %y) {
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 4
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vse32.v v8, (a1)
; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
@@ -147,7 +147,7 @@ define void @extract_v2i32_v8i32_6(ptr %x, ptr %y) {
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 6
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vse32.v v8, (a1)
; CHECK-NEXT: ret
%a = load <8 x i32>, ptr %x
@@ -173,7 +173,7 @@ define void @extract_v2i32_nxv16i32_2(<vscale x 16 x i32> %x, ptr %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 2)
@@ -186,7 +186,7 @@ define void @extract_v2i32_nxv16i32_4(<vscale x 16 x i32> %x, ptr %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 4
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 4)
@@ -199,7 +199,7 @@ define void @extract_v2i32_nxv16i32_6(<vscale x 16 x i32> %x, ptr %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 6
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 6)
@@ -212,7 +212,7 @@ define void @extract_v2i32_nxv16i32_8(<vscale x 16 x i32> %x, ptr %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, m4, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 8
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 8)
@@ -236,7 +236,7 @@ define void @extract_v2i8_nxv2i8_2(<vscale x 2 x i8> %x, ptr %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
-; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%c = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %x, i64 2)
@@ -249,7 +249,7 @@ define void @extract_v2i8_nxv2i8_4(<vscale x 2 x i8> %x, ptr %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 4
-; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%c = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %x, i64 4)
@@ -262,7 +262,7 @@ define void @extract_v2i8_nxv2i8_6(<vscale x 2 x i8> %x, ptr %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 6
-; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%c = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %x, i64 6)
@@ -275,7 +275,7 @@ define void @extract_v8i32_nxv16i32_8(<vscale x 16 x i32> %x, ptr %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 8
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%c = call <8 x i32> @llvm.vector.extract.v8i32.nxv16i32(<vscale x 16 x i32> %x, i64 8)
@@ -439,7 +439,7 @@ define void @extract_v2i1_v64i1_2(ptr %x, ptr %y) {
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 2, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
-; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
@@ -468,7 +468,7 @@ define void @extract_v2i1_v64i1_42(ptr %x, ptr %y) {
; CHECK-NEXT: li a0, 42
; CHECK-NEXT: vsetivli zero, 2, e8, m4, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
@@ -513,7 +513,7 @@ define void @extract_v2i1_nxv2i1_2(<vscale x 2 x i1> %x, ptr %y) {
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
-; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
@@ -557,7 +557,7 @@ define void @extract_v2i1_nxv64i1_2(<vscale x 64 x i1> %x, ptr %y) {
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 2, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
-; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
@@ -583,7 +583,7 @@ define void @extract_v2i1_nxv64i1_42(<vscale x 64 x i1> %x, ptr %y) {
; CHECK-NEXT: li a1, 42
; CHECK-NEXT: vsetivli zero, 2, e8, m4, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
-; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
@@ -608,7 +608,7 @@ define void @extract_v2i1_nxv32i1_26(<vscale x 32 x i1> %x, ptr %y) {
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 2, e8, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 26
-; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll
index 88a86bbdab9cd6b..95c8c4a95810cf4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll
@@ -49,9 +49,9 @@ define void @fpext_v8f16_v8f32(ptr %x, ptr %y) {
; LMULMAX1-NEXT: vle16.v v8, (a0)
; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; LMULMAX1-NEXT: vfwcvt.f.f.v v9, v8
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1-NEXT: vfwcvt.f.f.v v10, v8
; LMULMAX1-NEXT: addi a0, a1, 16
; LMULMAX1-NEXT: vse32.v v10, (a0)
@@ -80,7 +80,7 @@ define void @fpext_v8f16_v8f64(ptr %x, ptr %y) {
; LMULMAX1-NEXT: vle16.v v8, (a0)
; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v9, v8, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; LMULMAX1-NEXT: vfwcvt.f.f.v v10, v9
; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; LMULMAX1-NEXT: vfwcvt.f.f.v v9, v10
@@ -94,9 +94,9 @@ define void @fpext_v8f16_v8f64(ptr %x, ptr %y) {
; LMULMAX1-NEXT: vfwcvt.f.f.v v10, v8
; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; LMULMAX1-NEXT: vfwcvt.f.f.v v12, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; LMULMAX1-NEXT: vfwcvt.f.f.v v10, v8
; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; LMULMAX1-NEXT: vfwcvt.f.f.v v8, v10
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
index ea818df7329c7d1..2da24cccadd49f2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
@@ -291,7 +291,7 @@ define <4 x half> @unary_interleave_v4f16(<4 x half> %x) {
; V128: # %bb.0:
; V128-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; V128-NEXT: vslidedown.vi v10, v8, 2
-; V128-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; V128-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; V128-NEXT: vwaddu.vv v9, v8, v10
; V128-NEXT: li a0, -1
; V128-NEXT: vwmaccu.vx v9, a0, v10
@@ -316,7 +316,7 @@ define <4 x float> @unary_interleave_v4f32(<4 x float> %x) {
; V128: # %bb.0:
; V128-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; V128-NEXT: vslidedown.vi v10, v8, 2
-; V128-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; V128-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; V128-NEXT: vwaddu.vv v9, v8, v10
; V128-NEXT: li a0, -1
; V128-NEXT: vwmaccu.vx v9, a0, v10
@@ -383,7 +383,7 @@ define <8 x half> @unary_interleave_v8f16(<8 x half> %x) {
; V128: # %bb.0:
; V128-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; V128-NEXT: vslidedown.vi v10, v8, 4
-; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; V128-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; V128-NEXT: vwaddu.vv v9, v8, v10
; V128-NEXT: li a0, -1
; V128-NEXT: vwmaccu.vx v9, a0, v10
@@ -408,7 +408,7 @@ define <8 x float> @unary_interleave_v8f32(<8 x float> %x) {
; V128: # %bb.0:
; V128-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; V128-NEXT: vslidedown.vi v12, v8, 4
-; V128-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; V128-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; V128-NEXT: vwaddu.vv v10, v12, v8
; V128-NEXT: li a0, -1
; V128-NEXT: vwmaccu.vx v10, a0, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
index 44b96d076df4552..037b78b31e39821 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
@@ -498,13 +498,13 @@ define void @fp2si_v8f32_v8i64(ptr %x, ptr %y) {
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; LMULMAX1-NEXT: vfwcvt.rtz.x.f.v v10, v8
; LMULMAX1-NEXT: vfwcvt.rtz.x.f.v v11, v9
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; LMULMAX1-NEXT: vfwcvt.rtz.x.f.v v12, v8
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; LMULMAX1-NEXT: vfwcvt.rtz.x.f.v v9, v8
; LMULMAX1-NEXT: addi a0, a1, 16
; LMULMAX1-NEXT: vse64.v v9, (a0)
@@ -538,13 +538,13 @@ define void @fp2ui_v8f32_v8i64(ptr %x, ptr %y) {
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; LMULMAX1-NEXT: vfwcvt.rtz.xu.f.v v10, v8
; LMULMAX1-NEXT: vfwcvt.rtz.xu.f.v v11, v9
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; LMULMAX1-NEXT: vfwcvt.rtz.xu.f.v v12, v8
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; LMULMAX1-NEXT: vfwcvt.rtz.xu.f.v v9, v8
; LMULMAX1-NEXT: addi a0, a1, 16
; LMULMAX1-NEXT: vse64.v v9, (a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
index 1952789b4073344..64331acc9c04b46 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
@@ -430,7 +430,7 @@ define <8 x float> @si2fp_v8i1_v8f32(<8 x i1> %x) {
; LMULMAX1-NEXT: vmerge.vim v10, v10, 1, v0
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v10, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; LMULMAX1-NEXT: vmsne.vi v0, v10, 0
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1-NEXT: vmerge.vim v10, v9, -1, v0
@@ -460,7 +460,7 @@ define <8 x float> @ui2fp_v8i1_v8f32(<8 x i1> %x) {
; LMULMAX1-NEXT: vmerge.vim v10, v10, 1, v0
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v10, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; LMULMAX1-NEXT: vmsne.vi v0, v10, 0
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1-NEXT: vmerge.vim v10, v9, 1, v0
@@ -516,7 +516,7 @@ define void @si2fp_v8i16_v8f64(ptr %x, ptr %y) {
; LMULMAX1-NEXT: vle16.v v8, (a0)
; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v9, v8, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; LMULMAX1-NEXT: vsext.vf2 v10, v9
; LMULMAX1-NEXT: vfwcvt.f.x.v v9, v10
; LMULMAX1-NEXT: vsext.vf2 v10, v8
@@ -526,9 +526,9 @@ define void @si2fp_v8i16_v8f64(ptr %x, ptr %y) {
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; LMULMAX1-NEXT: vsext.vf2 v10, v8
; LMULMAX1-NEXT: vfwcvt.f.x.v v12, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; LMULMAX1-NEXT: vsext.vf2 v10, v8
; LMULMAX1-NEXT: vfwcvt.f.x.v v8, v10
; LMULMAX1-NEXT: addi a0, a1, 48
@@ -561,7 +561,7 @@ define void @ui2fp_v8i16_v8f64(ptr %x, ptr %y) {
; LMULMAX1-NEXT: vle16.v v8, (a0)
; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v9, v8, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; LMULMAX1-NEXT: vzext.vf2 v10, v9
; LMULMAX1-NEXT: vfwcvt.f.xu.v v9, v10
; LMULMAX1-NEXT: vzext.vf2 v10, v8
@@ -571,9 +571,9 @@ define void @ui2fp_v8i16_v8f64(ptr %x, ptr %y) {
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; LMULMAX1-NEXT: vzext.vf2 v10, v8
; LMULMAX1-NEXT: vfwcvt.f.xu.v v12, v10
-; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; LMULMAX1-NEXT: vzext.vf2 v10, v8
; LMULMAX1-NEXT: vfwcvt.f.xu.v v8, v10
; LMULMAX1-NEXT: addi a0, a1, 48
@@ -611,7 +611,7 @@ define <8 x double> @si2fp_v8i1_v8f64(<8 x i1> %x) {
; LMULMAX1-NEXT: vmerge.vim v9, v12, 1, v0
; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v9, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; LMULMAX1-NEXT: vmsne.vi v0, v9, 0
; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; LMULMAX1-NEXT: vmerge.vim v13, v11, -1, v0
@@ -622,7 +622,7 @@ define <8 x double> @si2fp_v8i1_v8f64(<8 x i1> %x) {
; LMULMAX1-NEXT: vmerge.vim v10, v13, 1, v0
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v10, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; LMULMAX1-NEXT: vmsne.vi v0, v10, 0
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; LMULMAX1-NEXT: vmerge.vim v13, v11, -1, v0
@@ -631,7 +631,7 @@ define <8 x double> @si2fp_v8i1_v8f64(<8 x i1> %x) {
; LMULMAX1-NEXT: vmerge.vim v12, v12, 1, v0
; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v12, v12, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; LMULMAX1-NEXT: vmsne.vi v0, v12, 0
; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; LMULMAX1-NEXT: vmerge.vim v12, v11, -1, v0
@@ -662,7 +662,7 @@ define <8 x double> @ui2fp_v8i1_v8f64(<8 x i1> %x) {
; LMULMAX1-NEXT: vmerge.vim v9, v12, 1, v0
; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v9, v9, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; LMULMAX1-NEXT: vmsne.vi v0, v9, 0
; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; LMULMAX1-NEXT: vmerge.vim v13, v11, 1, v0
@@ -673,7 +673,7 @@ define <8 x double> @ui2fp_v8i1_v8f64(<8 x i1> %x) {
; LMULMAX1-NEXT: vmerge.vim v10, v13, 1, v0
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v10, v10, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; LMULMAX1-NEXT: vmsne.vi v0, v10, 0
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; LMULMAX1-NEXT: vmerge.vim v13, v11, 1, v0
@@ -682,7 +682,7 @@ define <8 x double> @ui2fp_v8i1_v8f64(<8 x i1> %x) {
; LMULMAX1-NEXT: vmerge.vim v12, v12, 1, v0
; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v12, v12, 2
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; LMULMAX1-NEXT: vmsne.vi v0, v12, 0
; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; LMULMAX1-NEXT: vmerge.vim v12, v11, 1, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector-shuffle.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector-shuffle.ll
index 64e1bf3e6c0324b..7ddc38b6818e9ab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector-shuffle.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector-shuffle.ll
@@ -82,7 +82,7 @@ define <4 x i32> @insert_subvector_load_v4i32_v2i32(<4 x i32> %v1, ptr %p) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%v2 = load <2 x i32>, ptr %p
@@ -97,7 +97,7 @@ define <4 x i32> @insert_subvector_vp_load_v4i32_v2i32(<4 x i32> %v1, ptr %p, <2
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v9, (a0), v0.t
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%v2 = call <2 x i32> @llvm.vp.load.v2i32(ptr %p, <2 x i1> %mask, i32 2)
@@ -112,7 +112,7 @@ define <4 x i32> @insert_subvector_add_v4i32_v2i32(<4 x i32> %v1, <2 x i32> %v2)
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vid.v v10
; CHECK-NEXT: vadd.vv v9, v9, v10
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%v3 = add <2 x i32> %v2, <i32 0, i32 1>
@@ -127,7 +127,7 @@ define <4 x i32> @insert_subvector_vp_add_v4i32_v2i32(<4 x i32> %v1, <2 x i32> %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vadd.vi v9, v9, 1, v0.t
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%v3 = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %v2, <2 x i32> <i32 1, i32 1>, <2 x i1> %mask, i32 2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index a77c49c942561b6..28bdf0651e3866c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -14,7 +14,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_v2i32_0(<vscale x 8 x i32> %vec, ptr %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e32, m4, tu, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, ma
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%sv = load <2 x i32>, ptr %svp
@@ -53,7 +53,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_v8i32_0(<vscale x 8 x i32> %vec, ptr %
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; LMULMAX2-NEXT: vle32.v v12, (a0)
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m4, tu, ma
+; LMULMAX2-NEXT: vsetvli zero, zero, e32, m4, tu, ma
; LMULMAX2-NEXT: vmv.v.v v8, v12
; LMULMAX2-NEXT: ret
;
@@ -63,7 +63,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_v8i32_0(<vscale x 8 x i32> %vec, ptr %
; LMULMAX1-NEXT: vle32.v v12, (a0)
; LMULMAX1-NEXT: addi a0, a0, 16
; LMULMAX1-NEXT: vle32.v v16, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m4, tu, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m4, tu, ma
; LMULMAX1-NEXT: vmv.v.v v8, v12
; LMULMAX1-NEXT: vsetivli zero, 8, e32, m4, tu, ma
; LMULMAX1-NEXT: vslideup.vi v8, v16, 4
@@ -279,7 +279,7 @@ define void @insert_v4i16_v2i16_0(ptr %vp, ptr %svp) {
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v9, (a1)
-; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, ma
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, ma
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vse16.v v8, (a0)
@@ -388,7 +388,7 @@ define void @insert_v8i1_v4i1_0(ptr %vp, ptr %svp) {
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, ma
; CHECK-NEXT: vmv.v.v v9, v8
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmsne.vi v8, v9, 0
@@ -432,7 +432,7 @@ define <vscale x 2 x i16> @insert_nxv2i16_v2i16_0(<vscale x 2 x i16> %v, ptr %sv
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v9, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, ma
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, ma
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%sv = load <2 x i16>, ptr %svp
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll
index f3570495600f3c3..46852c21e6f8587 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll
@@ -273,7 +273,7 @@ define i16 @explode_16xi16(<16 x i16> %v) {
; CHECK-NEXT: vmv.x.s a4, v10
; CHECK-NEXT: vslidedown.vi v10, v8, 7
; CHECK-NEXT: vmv.x.s a5, v10
-; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v8, 8
; CHECK-NEXT: vmv.x.s a6, v10
; CHECK-NEXT: vslidedown.vi v10, v8, 9
@@ -406,7 +406,7 @@ define i32 @explode_8xi32(<8 x i32> %v) {
; RV32-NEXT: vmv.x.s a0, v10
; RV32-NEXT: vslidedown.vi v10, v8, 3
; RV32-NEXT: vmv.x.s a1, v10
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v10, v8, 4
; RV32-NEXT: vmv.x.s a2, v10
; RV32-NEXT: vslidedown.vi v10, v8, 5
@@ -434,7 +434,7 @@ define i32 @explode_8xi32(<8 x i32> %v) {
; RV64-NEXT: vmv.x.s a0, v10
; RV64-NEXT: vslidedown.vi v10, v8, 3
; RV64-NEXT: vmv.x.s a1, v10
-; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64-NEXT: vslidedown.vi v10, v8, 4
; RV64-NEXT: vmv.x.s a2, v10
; RV64-NEXT: vslidedown.vi v10, v8, 5
@@ -489,7 +489,7 @@ define i32 @explode_16xi32(<16 x i32> %v) {
; RV32-NEXT: vmv.x.s a0, v12
; RV32-NEXT: vslidedown.vi v12, v8, 3
; RV32-NEXT: vmv.x.s a1, v12
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v12, v8, 4
; RV32-NEXT: vmv.x.s a2, v12
; RV32-NEXT: vslidedown.vi v12, v8, 5
@@ -549,7 +549,7 @@ define i32 @explode_16xi32(<16 x i32> %v) {
; RV64-NEXT: vmv.x.s a0, v12
; RV64-NEXT: vslidedown.vi v12, v8, 3
; RV64-NEXT: vmv.x.s a1, v12
-; RV64-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64-NEXT: vslidedown.vi v12, v8, 4
; RV64-NEXT: vmv.x.s a2, v12
; RV64-NEXT: vslidedown.vi v12, v8, 5
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
index 2c3bc2ef4fe5644..3130a2fefcddb54 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
@@ -57,9 +57,9 @@ define void @sext_v8i8_v8i32(ptr %x, ptr %z) {
; LMULMAX1-NEXT: vle8.v v8, (a0)
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v9, v8
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v10, v8
; LMULMAX1-NEXT: addi a0, a1, 16
; LMULMAX1-NEXT: vse32.v v10, (a0)
@@ -88,16 +88,16 @@ define void @sext_v32i8_v32i32(ptr %x, ptr %z) {
; LMULMAX2-NEXT: vle8.v v8, (a0)
; LMULMAX2-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; LMULMAX2-NEXT: vslidedown.vi v10, v8, 8
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; LMULMAX2-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; LMULMAX2-NEXT: vsext.vf4 v12, v10
; LMULMAX2-NEXT: vsext.vf4 v10, v8
; LMULMAX2-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; LMULMAX2-NEXT: vslidedown.vi v8, v8, 16
; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; LMULMAX2-NEXT: vsext.vf4 v14, v8
-; LMULMAX2-NEXT: vsetivli zero, 8, e8, m1, ta, ma
+; LMULMAX2-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; LMULMAX2-NEXT: vslidedown.vi v8, v8, 8
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; LMULMAX2-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; LMULMAX2-NEXT: vsext.vf4 v16, v8
; LMULMAX2-NEXT: addi a0, a1, 96
; LMULMAX2-NEXT: vse32.v v16, (a0)
@@ -116,29 +116,29 @@ define void @sext_v32i8_v32i32(ptr %x, ptr %z) {
; LMULMAX1-NEXT: vle8.v v9, (a0)
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v10, v8, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v11, v10
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v10, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v12, v10
; LMULMAX1-NEXT: vsext.vf4 v10, v8
; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 8
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v13, v8
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v14, v8
; LMULMAX1-NEXT: vsext.vf4 v8, v9
; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v9, v9, 8
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v15, v9
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; LMULMAX1-NEXT: vslidedown.vi v9, v9, 4
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; LMULMAX1-NEXT: vsext.vf4 v16, v9
; LMULMAX1-NEXT: addi a0, a1, 48
; LMULMAX1-NEXT: vse32.v v16, (a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
index a54fa2e9b765fef..95bc10af1e6e5f0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
@@ -164,7 +164,7 @@ define <4 x i32> @interleave_v4i32_offset_2(<4 x i32> %x, <4 x i32> %y) {
; V128: # %bb.0:
; V128-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; V128-NEXT: vslidedown.vi v10, v9, 2
-; V128-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; V128-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; V128-NEXT: vwaddu.vv v9, v8, v10
; V128-NEXT: li a0, -1
; V128-NEXT: vwmaccu.vx v9, a0, v10
@@ -450,7 +450,7 @@ define <4 x i8> @unary_interleave_v4i8(<4 x i8> %x) {
; V128: # %bb.0:
; V128-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; V128-NEXT: vslidedown.vi v10, v8, 2
-; V128-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; V128-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; V128-NEXT: vwaddu.vv v9, v8, v10
; V128-NEXT: li a0, -1
; V128-NEXT: vwmaccu.vx v9, a0, v10
@@ -502,7 +502,7 @@ define <4 x i16> @unary_interleave_v4i16(<4 x i16> %x) {
; V128: # %bb.0:
; V128-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; V128-NEXT: vslidedown.vi v10, v8, 2
-; V128-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; V128-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; V128-NEXT: vwaddu.vv v9, v8, v10
; V128-NEXT: li a0, -1
; V128-NEXT: vwmaccu.vx v9, a0, v10
@@ -527,7 +527,7 @@ define <4 x i32> @unary_interleave_v4i32(<4 x i32> %x) {
; V128: # %bb.0:
; V128-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; V128-NEXT: vslidedown.vi v10, v8, 2
-; V128-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; V128-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; V128-NEXT: vwaddu.vv v9, v8, v10
; V128-NEXT: li a0, -1
; V128-NEXT: vwmaccu.vx v9, a0, v10
@@ -594,7 +594,7 @@ define <8 x i8> @unary_interleave_v8i8(<8 x i8> %x) {
; V128: # %bb.0:
; V128-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; V128-NEXT: vslidedown.vi v10, v8, 4
-; V128-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; V128-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; V128-NEXT: vwaddu.vv v9, v8, v10
; V128-NEXT: li a0, -1
; V128-NEXT: vwmaccu.vx v9, a0, v10
@@ -619,7 +619,7 @@ define <8 x i16> @unary_interleave_v8i16(<8 x i16> %x) {
; V128: # %bb.0:
; V128-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; V128-NEXT: vslidedown.vi v10, v8, 4
-; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; V128-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; V128-NEXT: vwaddu.vv v9, v10, v8
; V128-NEXT: li a0, -1
; V128-NEXT: vwmaccu.vx v9, a0, v8
@@ -644,7 +644,7 @@ define <8 x i32> @unary_interleave_v8i32(<8 x i32> %x) {
; V128: # %bb.0:
; V128-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; V128-NEXT: vslidedown.vi v12, v8, 4
-; V128-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; V128-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; V128-NEXT: vwaddu.vv v10, v8, v12
; V128-NEXT: li a0, -1
; V128-NEXT: vwmaccu.vx v10, a0, v12
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
index e6868abdb5b1d71..cf8d800dc208583 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
@@ -783,7 +783,7 @@ define void @sdiv_v6i16(ptr %x, ptr %y) {
; CHECK-NEXT: vsetivli zero, 2, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v8, 4
; CHECK-NEXT: vslidedown.vi v11, v9, 4
-; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vdiv.vv v10, v11, v10
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vdiv.vv v8, v9, v8
@@ -872,7 +872,7 @@ define void @srem_v6i16(ptr %x, ptr %y) {
; CHECK-NEXT: vsetivli zero, 2, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v8, 4
; CHECK-NEXT: vslidedown.vi v11, v9, 4
-; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vrem.vv v10, v11, v10
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vrem.vv v8, v9, v8
@@ -961,7 +961,7 @@ define void @udiv_v6i16(ptr %x, ptr %y) {
; CHECK-NEXT: vsetivli zero, 2, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v8, 4
; CHECK-NEXT: vslidedown.vi v11, v9, 4
-; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vdivu.vv v10, v11, v10
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vdivu.vv v8, v9, v8
@@ -1050,7 +1050,7 @@ define void @urem_v6i16(ptr %x, ptr %y) {
; CHECK-NEXT: vsetivli zero, 2, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v8, 4
; CHECK-NEXT: vslidedown.vi v11, v9, 4
-; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vremu.vv v10, v11, v10
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vremu.vv v8, v9, v8
@@ -1198,9 +1198,9 @@ define void @mulhu_v6i16(ptr %x) {
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vid.v v9
; CHECK-NEXT: vadd.vi v9, v9, 12
-; CHECK-NEXT: vsetivli zero, 2, e16, m1, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v8, 4
-; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vdivu.vv v9, v10, v9
; CHECK-NEXT: lui a1, 45217
; CHECK-NEXT: addi a1, a1, -1785
@@ -1361,9 +1361,9 @@ define void @mulhs_v6i16(ptr %x) {
; CHECK-NEXT: vid.v v10
; CHECK-NEXT: li a1, -14
; CHECK-NEXT: vmadd.vx v10, a1, v9
-; CHECK-NEXT: vsetivli zero, 2, e16, m1, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v9, v8, 4
-; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vdiv.vv v9, v9, v10
; CHECK-NEXT: lui a1, 1020016
; CHECK-NEXT: addi a1, a1, 2041
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleave-store.ll
index 9161cedd58e3c6b..48eb47fae5630b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleave-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleave-store.ll
@@ -15,7 +15,7 @@ define void @vector_interleave_store_v32i1_v16i1(<16 x i1> %a, <16 x i1> %b, ptr
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v8, 16
-; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; CHECK-NEXT: vwaddu.vv v12, v8, v10
; CHECK-NEXT: li a2, -1
; CHECK-NEXT: vwmaccu.vx v12, a2, v10
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
index eeb8e517d01d2d8..5faa050603ade5d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
@@ -17,9 +17,9 @@ define {<3 x i32>, <3 x i32>} @load_factor2_v3(ptr %ptr) {
; CHECK-NEXT: vadd.vv v9, v8, v8
; CHECK-NEXT: vrgather.vv v8, v10, v9
; CHECK-NEXT: vmv.v.i v0, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v12, v10, 4
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vrgather.vi v8, v12, 0, v0.t
; CHECK-NEXT: vadd.vi v11, v9, 1
; CHECK-NEXT: vrgather.vv v9, v10, v11
@@ -178,7 +178,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs1r.v v0, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; RV32-NEXT: vslidedown.vi v16, v16, 16
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: li a5, 45
@@ -186,7 +186,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vrgatherei16.vv v12, v16, v8, v0.t
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: li a5, 25
@@ -666,7 +666,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV64-NEXT: vrgather.vi v8, v16, 4
; RV64-NEXT: li a1, 128
; RV64-NEXT: vmv.s.x v4, a1
-; RV64-NEXT: vsetivli zero, 8, e64, m8, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vslidedown.vi v24, v16, 8
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: li a2, 19
@@ -674,7 +674,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v4
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: slli a2, a1, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 6ee0e4525f5ec72..814b729b62dd746 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -12355,10 +12355,10 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64V-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64V-NEXT: vmv1r.v v12, v10
; RV64V-NEXT: vluxei64.v v12, (a0), v16, v0.t
-; RV64V-NEXT: vsetivli zero, 16, e8, m2, ta, ma
+; RV64V-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; RV64V-NEXT: vslidedown.vi v10, v10, 16
; RV64V-NEXT: vslidedown.vi v8, v8, 16
-; RV64V-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64V-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64V-NEXT: vsext.vf8 v16, v8
; RV64V-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64V-NEXT: vslidedown.vi v0, v0, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index 60b61e889315cfe..521c6ea3fafc3d9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -528,7 +528,7 @@ define void @mscatter_baseidx_v8i8(<8 x i8> %val, ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vse8.v v10, (a2)
; RV64ZVE32F-NEXT: .LBB9_4: # %else2
@@ -552,7 +552,7 @@ define void @mscatter_baseidx_v8i8(<8 x i8> %val, ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse8.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB9_9: # %else10
@@ -578,7 +578,7 @@ define void @mscatter_baseidx_v8i8(<8 x i8> %val, ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 3
; RV64ZVE32F-NEXT: vse8.v v9, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
@@ -605,7 +605,7 @@ define void @mscatter_baseidx_v8i8(<8 x i8> %val, ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a1, v9
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
; RV64ZVE32F-NEXT: vse8.v v8, (a0)
; RV64ZVE32F-NEXT: ret
@@ -1079,7 +1079,7 @@ define void @mscatter_baseidx_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vse16.v v10, (a2)
; RV64ZVE32F-NEXT: .LBB18_4: # %else2
@@ -1104,7 +1104,7 @@ define void @mscatter_baseidx_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB18_9: # %else10
@@ -1132,7 +1132,7 @@ define void @mscatter_baseidx_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 3
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
@@ -1163,7 +1163,7 @@ define void @mscatter_baseidx_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: vmv.x.s a1, v9
; RV64ZVE32F-NEXT: slli a1, a1, 1
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
; RV64ZVE32F-NEXT: vse16.v v8, (a0)
; RV64ZVE32F-NEXT: ret
@@ -1212,7 +1212,7 @@ define void @mscatter_baseidx_sext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vse16.v v10, (a2)
; RV64ZVE32F-NEXT: .LBB19_4: # %else2
@@ -1237,7 +1237,7 @@ define void @mscatter_baseidx_sext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB19_9: # %else10
@@ -1265,7 +1265,7 @@ define void @mscatter_baseidx_sext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 3
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
@@ -1296,7 +1296,7 @@ define void @mscatter_baseidx_sext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vmv.x.s a1, v9
; RV64ZVE32F-NEXT: slli a1, a1, 1
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
; RV64ZVE32F-NEXT: vse16.v v8, (a0)
; RV64ZVE32F-NEXT: ret
@@ -1346,7 +1346,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vse16.v v10, (a2)
; RV64ZVE32F-NEXT: .LBB20_4: # %else2
@@ -1372,7 +1372,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB20_9: # %else10
@@ -1402,7 +1402,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 3
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
@@ -1436,7 +1436,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: andi a1, a1, 255
; RV64ZVE32F-NEXT: slli a1, a1, 1
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
; RV64ZVE32F-NEXT: vse16.v v8, (a0)
; RV64ZVE32F-NEXT: ret
@@ -1484,7 +1484,7 @@ define void @mscatter_baseidx_v8i16(<8 x i16> %val, ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vse16.v v10, (a2)
; RV64ZVE32F-NEXT: .LBB21_4: # %else2
@@ -1509,7 +1509,7 @@ define void @mscatter_baseidx_v8i16(<8 x i16> %val, ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB21_9: # %else10
@@ -1537,7 +1537,7 @@ define void @mscatter_baseidx_v8i16(<8 x i16> %val, ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 3
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
@@ -1567,7 +1567,7 @@ define void @mscatter_baseidx_v8i16(<8 x i16> %val, ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vmv.x.s a1, v9
; RV64ZVE32F-NEXT: slli a1, a1, 1
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
; RV64ZVE32F-NEXT: vse16.v v8, (a0)
; RV64ZVE32F-NEXT: ret
@@ -2009,7 +2009,7 @@ define void @mscatter_baseidx_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB29_9: # %else10
@@ -2068,7 +2068,7 @@ define void @mscatter_baseidx_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: vmv.x.s a1, v10
; RV64ZVE32F-NEXT: slli a1, a1, 2
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
@@ -2141,7 +2141,7 @@ define void @mscatter_baseidx_sext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB30_9: # %else10
@@ -2200,7 +2200,7 @@ define void @mscatter_baseidx_sext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vmv.x.s a1, v10
; RV64ZVE32F-NEXT: slli a1, a1, 2
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
@@ -2278,7 +2278,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB31_9: # %else10
@@ -2342,7 +2342,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: andi a1, a1, 255
; RV64ZVE32F-NEXT: slli a1, a1, 2
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
@@ -2417,7 +2417,7 @@ define void @mscatter_baseidx_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i16> %
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB32_9: # %else10
@@ -2476,7 +2476,7 @@ define void @mscatter_baseidx_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i16> %
; RV64ZVE32F-NEXT: vmv.x.s a1, v10
; RV64ZVE32F-NEXT: slli a1, a1, 2
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
@@ -2550,7 +2550,7 @@ define void @mscatter_baseidx_sext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB33_9: # %else10
@@ -2609,7 +2609,7 @@ define void @mscatter_baseidx_sext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: vmv.x.s a1, v10
; RV64ZVE32F-NEXT: slli a1, a1, 2
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
@@ -2688,7 +2688,7 @@ define void @mscatter_baseidx_zext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: and a3, a3, a1
; RV64ZVE32F-NEXT: slli a3, a3, 2
; RV64ZVE32F-NEXT: add a3, a0, a3
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5
; RV64ZVE32F-NEXT: vse32.v v12, (a3)
; RV64ZVE32F-NEXT: .LBB34_9: # %else10
@@ -2752,7 +2752,7 @@ define void @mscatter_baseidx_zext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: and a1, a2, a1
; RV64ZVE32F-NEXT: slli a1, a1, 2
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
@@ -2824,7 +2824,7 @@ define void @mscatter_baseidx_v8i32(<8 x i32> %val, ptr %base, <8 x i32> %idxs,
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 5
; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: .LBB35_9: # %else10
@@ -2881,7 +2881,7 @@ define void @mscatter_baseidx_v8i32(<8 x i32> %val, ptr %base, <8 x i32> %idxs,
; RV64ZVE32F-NEXT: vmv.x.s a1, v10
; RV64ZVE32F-NEXT: slli a1, a1, 2
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
@@ -6334,7 +6334,7 @@ define void @mscatter_baseidx_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i8> %i
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vse16.v v10, (a2)
; RV64ZVE32F-NEXT: .LBB58_4: # %else2
@@ -6359,7 +6359,7 @@ define void @mscatter_baseidx_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i8> %i
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB58_9: # %else10
@@ -6387,7 +6387,7 @@ define void @mscatter_baseidx_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i8> %i
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 3
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
@@ -6418,7 +6418,7 @@ define void @mscatter_baseidx_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i8> %i
; RV64ZVE32F-NEXT: vmv.x.s a1, v9
; RV64ZVE32F-NEXT: slli a1, a1, 1
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
; RV64ZVE32F-NEXT: vse16.v v8, (a0)
; RV64ZVE32F-NEXT: ret
@@ -6467,7 +6467,7 @@ define void @mscatter_baseidx_sext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vse16.v v10, (a2)
; RV64ZVE32F-NEXT: .LBB59_4: # %else2
@@ -6492,7 +6492,7 @@ define void @mscatter_baseidx_sext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB59_9: # %else10
@@ -6520,7 +6520,7 @@ define void @mscatter_baseidx_sext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 3
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
@@ -6551,7 +6551,7 @@ define void @mscatter_baseidx_sext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: vmv.x.s a1, v9
; RV64ZVE32F-NEXT: slli a1, a1, 1
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
; RV64ZVE32F-NEXT: vse16.v v8, (a0)
; RV64ZVE32F-NEXT: ret
@@ -6601,7 +6601,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vse16.v v10, (a2)
; RV64ZVE32F-NEXT: .LBB60_4: # %else2
@@ -6627,7 +6627,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB60_9: # %else10
@@ -6657,7 +6657,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 3
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
@@ -6691,7 +6691,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: andi a1, a1, 255
; RV64ZVE32F-NEXT: slli a1, a1, 1
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
; RV64ZVE32F-NEXT: vse16.v v8, (a0)
; RV64ZVE32F-NEXT: ret
@@ -6739,7 +6739,7 @@ define void @mscatter_baseidx_v8f16(<8 x half> %val, ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vse16.v v10, (a2)
; RV64ZVE32F-NEXT: .LBB61_4: # %else2
@@ -6764,7 +6764,7 @@ define void @mscatter_baseidx_v8f16(<8 x half> %val, ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB61_9: # %else10
@@ -6792,7 +6792,7 @@ define void @mscatter_baseidx_v8f16(<8 x half> %val, ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 3
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
@@ -6822,7 +6822,7 @@ define void @mscatter_baseidx_v8f16(<8 x half> %val, ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vmv.x.s a1, v9
; RV64ZVE32F-NEXT: slli a1, a1, 1
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
; RV64ZVE32F-NEXT: vse16.v v8, (a0)
; RV64ZVE32F-NEXT: ret
@@ -7122,28 +7122,28 @@ define void @mscatter_v8f32(<8 x float> %val, <8 x ptr> %ptrs, <8 x i1> %m) {
; RV64ZVE32F-NEXT: .LBB67_13: # %cond.store7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v10, (a5)
; RV64ZVE32F-NEXT: andi a0, a3, 32
; RV64ZVE32F-NEXT: beqz a0, .LBB67_6
; RV64ZVE32F-NEXT: .LBB67_14: # %cond.store9
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v10, (a4)
; RV64ZVE32F-NEXT: andi a0, a3, 64
; RV64ZVE32F-NEXT: beqz a0, .LBB67_7
; RV64ZVE32F-NEXT: .LBB67_15: # %cond.store11
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: andi a0, a3, -128
; RV64ZVE32F-NEXT: beqz a0, .LBB67_8
; RV64ZVE32F-NEXT: .LBB67_16: # %cond.store13
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a1)
; RV64ZVE32F-NEXT: ret
call void @llvm.masked.scatter.v8f32.v8p0(<8 x float> %val, <8 x ptr> %ptrs, i32 4, <8 x i1> %m)
@@ -7214,9 +7214,9 @@ define void @mscatter_baseidx_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x i8> %
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB68_9: # %else10
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
@@ -7255,7 +7255,7 @@ define void @mscatter_baseidx_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x i8> %
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB68_8
@@ -7266,7 +7266,7 @@ define void @mscatter_baseidx_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x i8> %
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB68_11
@@ -7276,9 +7276,9 @@ define void @mscatter_baseidx_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x i8> %
; RV64ZVE32F-NEXT: vmv.x.s a1, v10
; RV64ZVE32F-NEXT: slli a1, a1, 2
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds float, ptr %base, <8 x i8> %idxs
@@ -7350,9 +7350,9 @@ define void @mscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB69_9: # %else10
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
@@ -7391,7 +7391,7 @@ define void @mscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB69_8
@@ -7402,7 +7402,7 @@ define void @mscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB69_11
@@ -7412,9 +7412,9 @@ define void @mscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vmv.x.s a1, v10
; RV64ZVE32F-NEXT: slli a1, a1, 2
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
%eidxs = sext <8 x i8> %idxs to <8 x i32>
@@ -7491,9 +7491,9 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB70_9: # %else10
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
@@ -7535,7 +7535,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB70_8
@@ -7547,7 +7547,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB70_11
@@ -7558,9 +7558,9 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: andi a1, a1, 255
; RV64ZVE32F-NEXT: slli a1, a1, 2
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
%eidxs = zext <8 x i8> %idxs to <8 x i32>
@@ -7634,9 +7634,9 @@ define void @mscatter_baseidx_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x i16>
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB71_9: # %else10
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
@@ -7675,7 +7675,7 @@ define void @mscatter_baseidx_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x i16>
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB71_8
@@ -7686,7 +7686,7 @@ define void @mscatter_baseidx_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x i16>
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB71_11
@@ -7696,9 +7696,9 @@ define void @mscatter_baseidx_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x i16>
; RV64ZVE32F-NEXT: vmv.x.s a1, v10
; RV64ZVE32F-NEXT: slli a1, a1, 2
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds float, ptr %base, <8 x i16> %idxs
@@ -7771,9 +7771,9 @@ define void @mscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB72_9: # %else10
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
@@ -7812,7 +7812,7 @@ define void @mscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB72_8
@@ -7823,7 +7823,7 @@ define void @mscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB72_11
@@ -7833,9 +7833,9 @@ define void @mscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vmv.x.s a1, v10
; RV64ZVE32F-NEXT: slli a1, a1, 2
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
%eidxs = sext <8 x i16> %idxs to <8 x i32>
@@ -7913,9 +7913,9 @@ define void @mscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: and a3, a3, a1
; RV64ZVE32F-NEXT: slli a3, a3, 2
; RV64ZVE32F-NEXT: add a3, a0, a3
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a3)
; RV64ZVE32F-NEXT: .LBB73_9: # %else10
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
@@ -7957,7 +7957,7 @@ define void @mscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a3, a0, a3
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a3)
; RV64ZVE32F-NEXT: andi a3, a2, 32
; RV64ZVE32F-NEXT: bnez a3, .LBB73_8
@@ -7969,7 +7969,7 @@ define void @mscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a3, a0, a3
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a3)
; RV64ZVE32F-NEXT: andi a2, a2, -128
; RV64ZVE32F-NEXT: beqz a2, .LBB73_11
@@ -7980,9 +7980,9 @@ define void @mscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: and a1, a2, a1
; RV64ZVE32F-NEXT: slli a1, a1, 2
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
%eidxs = zext <8 x i16> %idxs to <8 x i32>
@@ -8053,9 +8053,9 @@ define void @mscatter_baseidx_v8f32(<8 x float> %val, ptr %base, <8 x i32> %idxs
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: .LBB74_9: # %else10
; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
@@ -8092,7 +8092,7 @@ define void @mscatter_baseidx_v8f32(<8 x float> %val, ptr %base, <8 x i32> %idxs
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB74_8
@@ -8103,7 +8103,7 @@ define void @mscatter_baseidx_v8f32(<8 x float> %val, ptr %base, <8 x i32> %idxs
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB74_11
@@ -8113,9 +8113,9 @@ define void @mscatter_baseidx_v8f32(<8 x float> %val, ptr %base, <8 x i32> %idxs
; RV64ZVE32F-NEXT: vmv.x.s a1, v10
; RV64ZVE32F-NEXT: slli a1, a1, 2
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds float, ptr %base, <8 x i32> %idxs
@@ -10682,7 +10682,7 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vse8.v v10, (a2)
; RV64ZVE32F-NEXT: .LBB91_4: # %else2
@@ -10714,7 +10714,7 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 5
; RV64ZVE32F-NEXT: vse8.v v11, (a2)
; RV64ZVE32F-NEXT: .LBB91_10: # %else10
@@ -10736,7 +10736,7 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 9
; RV64ZVE32F-NEXT: vse8.v v10, (a2)
; RV64ZVE32F-NEXT: .LBB91_15: # %else18
@@ -10760,7 +10760,7 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 13
; RV64ZVE32F-NEXT: vse8.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB91_20: # %else26
@@ -10783,7 +10783,7 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a1, v9
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 15
; RV64ZVE32F-NEXT: vse8.v v8, (a0)
; RV64ZVE32F-NEXT: .LBB91_24: # %else30
@@ -10801,7 +10801,7 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 3
; RV64ZVE32F-NEXT: vse8.v v11, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
@@ -10820,7 +10820,7 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 7
; RV64ZVE32F-NEXT: vse8.v v10, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 256
@@ -10847,7 +10847,7 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 11
; RV64ZVE32F-NEXT: vse8.v v9, (a2)
; RV64ZVE32F-NEXT: slli a2, a1, 51
@@ -10884,10 +10884,10 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64-NEXT: vsext.vf8 v16, v10
; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
-; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 16
; RV64-NEXT: vslidedown.vi v10, v10, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v10
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
@@ -10914,7 +10914,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB92_4: # %else2
@@ -10946,7 +10946,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v14, v13, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v14
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 5
; RV64ZVE32F-NEXT: vse8.v v14, (a2)
; RV64ZVE32F-NEXT: .LBB92_10: # %else10
@@ -10968,7 +10968,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v13, v8, 9
; RV64ZVE32F-NEXT: vse8.v v13, (a2)
; RV64ZVE32F-NEXT: .LBB92_15: # %else18
@@ -10992,7 +10992,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 11
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB92_19: # %else22
@@ -11014,7 +11014,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v11, v13, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 13
; RV64ZVE32F-NEXT: vse8.v v11, (a2)
; RV64ZVE32F-NEXT: .LBB92_23: # %else26
@@ -11036,7 +11036,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 17
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB92_28: # %else34
@@ -11068,7 +11068,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v12, v11, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 21
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB92_34: # %else42
@@ -11090,7 +11090,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 25
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB92_39: # %else50
@@ -11114,7 +11114,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 29
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB92_44: # %else58
@@ -11137,7 +11137,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a1, v10
; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 31
; RV64ZVE32F-NEXT: vse8.v v8, (a0)
; RV64ZVE32F-NEXT: .LBB92_48: # %else62
@@ -11155,7 +11155,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
@@ -11174,7 +11174,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v13, v8, 7
; RV64ZVE32F-NEXT: vse8.v v13, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 256
@@ -11201,7 +11201,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 15
; RV64ZVE32F-NEXT: vse8.v v11, (a2)
; RV64ZVE32F-NEXT: slli a2, a1, 47
@@ -11228,7 +11228,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 19
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: slli a2, a1, 43
@@ -11247,7 +11247,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 23
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: slli a2, a1, 39
@@ -11274,7 +11274,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 27
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: slli a2, a1, 35
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
index 4766b3727a46252..a2a8bd312ee80ea 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
@@ -1097,7 +1097,7 @@ define double @vreduce_fwadd_v32f64(ptr %x, double %s) {
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v8, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfwadd.vv v24, v8, v16
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vfmv.s.f v8, fa0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
index f2a1f2752cda000..11004bdcb195707 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
@@ -1431,7 +1431,7 @@ define i64 @vwreduce_add_v32i64(ptr %x) {
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV32-NEXT: vslidedown.vi v16, v8, 16
-; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; RV32-NEXT: vwadd.vv v24, v8, v16
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vmv.s.x v8, zero
@@ -1449,7 +1449,7 @@ define i64 @vwreduce_add_v32i64(ptr %x) {
; RV64-NEXT: vle32.v v8, (a0)
; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV64-NEXT: vslidedown.vi v16, v8, 16
-; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; RV64-NEXT: vwadd.vv v24, v8, v16
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vmv.s.x v8, zero
@@ -1470,7 +1470,7 @@ define i64 @vwreduce_uadd_v32i64(ptr %x) {
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV32-NEXT: vslidedown.vi v16, v8, 16
-; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; RV32-NEXT: vwaddu.vv v24, v8, v16
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vmv.s.x v8, zero
@@ -1488,7 +1488,7 @@ define i64 @vwreduce_uadd_v32i64(ptr %x) {
; RV64-NEXT: vle32.v v8, (a0)
; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV64-NEXT: vslidedown.vi v16, v8, 16
-; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; RV64-NEXT: vwaddu.vv v24, v8, v16
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vmv.s.x v8, zero
@@ -1567,7 +1567,7 @@ define i64 @vwreduce_add_v64i64(ptr %x) {
; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV32-NEXT: vslidedown.vi v24, v8, 16
; RV32-NEXT: vslidedown.vi v0, v16, 16
-; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; RV32-NEXT: vmv4r.v v8, v0
; RV32-NEXT: vwadd.vv v0, v24, v8
; RV32-NEXT: csrr a0, vlenb
@@ -1615,7 +1615,7 @@ define i64 @vwreduce_add_v64i64(ptr %x) {
; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV64-NEXT: vslidedown.vi v24, v8, 16
; RV64-NEXT: vslidedown.vi v0, v16, 16
-; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; RV64-NEXT: vmv4r.v v8, v0
; RV64-NEXT: vwadd.vv v0, v24, v8
; RV64-NEXT: csrr a0, vlenb
@@ -1666,7 +1666,7 @@ define i64 @vwreduce_uadd_v64i64(ptr %x) {
; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV32-NEXT: vslidedown.vi v24, v8, 16
; RV32-NEXT: vslidedown.vi v0, v16, 16
-; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; RV32-NEXT: vmv4r.v v8, v0
; RV32-NEXT: vwaddu.vv v0, v24, v8
; RV32-NEXT: csrr a0, vlenb
@@ -1714,7 +1714,7 @@ define i64 @vwreduce_uadd_v64i64(ptr %x) {
; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV64-NEXT: vslidedown.vi v24, v8, 16
; RV64-NEXT: vslidedown.vi v0, v16, 16
-; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; RV64-NEXT: vmv4r.v v8, v0
; RV64-NEXT: vwaddu.vv v0, v24, v8
; RV64-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
index f52ba6f51d5c897..d969e52e3075a9b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
@@ -80,7 +80,7 @@ define void @widen_4xv4i16_unaligned(ptr %x, ptr %z) {
; CHECK-NO-MISALIGN-NEXT: vle8.v v12, (a2)
; CHECK-NO-MISALIGN-NEXT: addi a0, a0, 24
; CHECK-NO-MISALIGN-NEXT: vle8.v v14, (a0)
-; CHECK-NO-MISALIGN-NEXT: vsetivli zero, 8, e16, m2, tu, ma
+; CHECK-NO-MISALIGN-NEXT: vsetvli zero, zero, e16, m2, tu, ma
; CHECK-NO-MISALIGN-NEXT: vslideup.vi v8, v10, 4
; CHECK-NO-MISALIGN-NEXT: vsetivli zero, 12, e16, m2, tu, ma
; CHECK-NO-MISALIGN-NEXT: vslideup.vi v8, v12, 8
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
index 1cc09b7f5eeb5f4..b064f5439bb0564 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
@@ -199,7 +199,7 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> %
; RV32-SLOW-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV32-SLOW-NEXT: vslide1down.vx v8, v8, a1
; RV32-SLOW-NEXT: vslide1down.vx v8, v8, a0
-; RV32-SLOW-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-SLOW-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; RV32-SLOW-NEXT: vslideup.vi v9, v8, 1
; RV32-SLOW-NEXT: vmv1r.v v8, v9
; RV32-SLOW-NEXT: ret
@@ -354,7 +354,7 @@ define void @mscatter_v4i16_align1(<4 x i16> %val, <4 x ptr> %ptrs, <4 x i1> %m)
; RV64-SLOW-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; RV64-SLOW-NEXT: vslidedown.vi v9, v8, 1
; RV64-SLOW-NEXT: vmv.x.s a1, v9
-; RV64-SLOW-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-SLOW-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; RV64-SLOW-NEXT: vslidedown.vi v9, v10, 1
; RV64-SLOW-NEXT: vmv.x.s a2, v9
; RV64-SLOW-NEXT: srli a3, a1, 8
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
index c9dc75e18774f8a..2bb6d9249f402e7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
@@ -215,7 +215,7 @@ define <32 x double> @vfwadd_v32f32(ptr %x, ptr %y) {
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv4r.v v24, v8
; CHECK-NEXT: vfwadd.vv v8, v16, v24
; CHECK-NEXT: csrr a0, vlenb
@@ -397,7 +397,7 @@ define <32 x double> @vfwadd_vf_v32f32(ptr %x, float %y) {
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v24, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfmv.v.f v16, fa0
; CHECK-NEXT: vfwcvt.f.f.v v8, v16
; CHECK-NEXT: vfwadd.wv v16, v8, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll
index 8ad858d4c76598e..fd2d4b23e4ef4c0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll
@@ -215,7 +215,7 @@ define <32 x double> @vfwmul_v32f32(ptr %x, ptr %y) {
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv4r.v v24, v8
; CHECK-NEXT: vfwmul.vv v8, v16, v24
; CHECK-NEXT: csrr a0, vlenb
@@ -397,9 +397,9 @@ define <32 x double> @vfwmul_vf_v32f32(ptr %x, float %y) {
; CHECK-NEXT: vle32.v v16, (a0)
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; CHECK-NEXT: vfwcvt.f.f.v v8, v16
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfwcvt.f.f.v v24, v16
; CHECK-NEXT: vfmv.v.f v16, fa0
; CHECK-NEXT: vfwcvt.f.f.v v0, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
index d22781d6a97ac21..b1e53eeefa0d33b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
@@ -215,7 +215,7 @@ define <32 x double> @vfwsub_v32f32(ptr %x, ptr %y) {
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv4r.v v24, v8
; CHECK-NEXT: vfwsub.vv v8, v16, v24
; CHECK-NEXT: csrr a0, vlenb
@@ -397,9 +397,9 @@ define <32 x double> @vfwsub_vf_v32f32(ptr %x, float %y) {
; CHECK-NEXT: vle32.v v16, (a0)
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; CHECK-NEXT: vfwcvt.f.f.v v8, v16
-; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v16, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfwcvt.f.f.v v24, v16
; CHECK-NEXT: vfmv.v.f v16, fa0
; CHECK-NEXT: vfwcvt.f.f.v v0, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
index beff4157b14bbaf..1b620efa4a9e154 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
@@ -301,7 +301,7 @@ define <32 x i8> @vpgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %
; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
@@ -1961,7 +1961,7 @@ define <32 x double> @vpgather_baseidx_v32i8_v32f64(ptr %base, <32 x i8> %idxs,
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; RV64-NEXT: vslidedown.vi v10, v8, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v10
; RV64-NEXT: vsll.vi v16, v16, 3
; RV64-NEXT: vsext.vf8 v24, v8
@@ -2019,9 +2019,9 @@ define <32 x double> @vpgather_baseidx_sext_v32i8_v32f64(ptr %base, <32 x i8> %i
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v24, v8
-; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 3
; RV64-NEXT: li a3, 16
@@ -2137,7 +2137,7 @@ define <32 x double> @vpgather_baseidx_v32i16_v32f64(ptr %base, <32 x i16> %idxs
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e16, m4, ta, ma
; RV64-NEXT: vslidedown.vi v12, v8, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v12
; RV64-NEXT: vsll.vi v16, v16, 3
; RV64-NEXT: vsext.vf4 v24, v8
@@ -2195,9 +2195,9 @@ define <32 x double> @vpgather_baseidx_sext_v32i16_v32f64(ptr %base, <32 x i16>
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v24, v8
-; RV64-NEXT: vsetivli zero, 16, e16, m4, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 3
; RV64-NEXT: li a3, 16
@@ -2312,7 +2312,7 @@ define <32 x double> @vpgather_baseidx_v32i32_v32f64(ptr %base, <32 x i32> %idxs
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV64-NEXT: vslidedown.vi v16, v8, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf2 v24, v16
; RV64-NEXT: vsll.vi v16, v24, 3
; RV64-NEXT: vsext.vf2 v24, v8
@@ -2369,9 +2369,9 @@ define <32 x double> @vpgather_baseidx_sext_v32i32_v32f64(ptr %base, <32 x i32>
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vsext.vf2 v24, v8
-; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf2 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 3
; RV64-NEXT: li a3, 16
@@ -2428,9 +2428,9 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(ptr %base, <32 x i32>
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vzext.vf2 v24, v8
-; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vzext.vf2 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 3
; RV64-NEXT: li a3, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
index 83e3422c44b95d1..1b3e84bfb0cee89 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
@@ -1797,7 +1797,7 @@ define void @vpscatter_baseidx_v32i32_v32f64(<32 x double> %val, ptr %base, <32
; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV64-NEXT: vslidedown.vi v0, v24, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf2 v16, v0
; RV64-NEXT: vsll.vi v16, v16, 3
; RV64-NEXT: vsext.vf2 v0, v24
@@ -1882,9 +1882,9 @@ define void @vpscatter_baseidx_sext_v32i32_v32f64(<32 x double> %val, ptr %base,
; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vsext.vf2 v0, v24
-; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; RV64-NEXT: vslidedown.vi v24, v24, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf2 v16, v24
; RV64-NEXT: vsll.vi v16, v16, 3
; RV64-NEXT: li a3, 16
@@ -1969,9 +1969,9 @@ define void @vpscatter_baseidx_zext_v32i32_v32f64(<32 x double> %val, ptr %base,
; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vzext.vf2 v0, v24
-; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; RV64-NEXT: vslidedown.vi v24, v24, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vzext.vf2 v16, v24
; RV64-NEXT: vsll.vi v16, v16, 3
; RV64-NEXT: li a3, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
index c8de041a26f4300..5dbbf1fc06c39fd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
@@ -353,7 +353,7 @@ define <32 x i64> @vwadd_v32i64(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv4r.v v24, v8
; CHECK-NEXT: vwadd.vv v8, v16, v24
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
index e51ca9f153dcb19..baf28d885fadf77 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
@@ -353,7 +353,7 @@ define <32 x i64> @vwaddu_v32i64(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv4r.v v24, v8
; CHECK-NEXT: vwaddu.vv v8, v16, v24
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
index 1e36d8d45ec16d9..2d2a4d402c4dcfb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
@@ -383,7 +383,7 @@ define <32 x i64> @vwmul_v32i64(ptr %x, ptr %y) {
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv4r.v v24, v8
; CHECK-NEXT: vwmul.vv v8, v16, v24
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
index 921037db2ea99ed..88154a508f13dfd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
@@ -375,7 +375,7 @@ define <32 x i64> @vwmulsu_v32i64(ptr %x, ptr %y) {
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv4r.v v24, v8
; CHECK-NEXT: vwmulsu.vv v8, v24, v16
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
index 2453e5423e13f9f..5a636b49f63e4df 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
@@ -359,7 +359,7 @@ define <32 x i64> @vwmulu_v32i64(ptr %x, ptr %y) {
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv4r.v v24, v8
; CHECK-NEXT: vwmulu.vv v8, v16, v24
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll
index 858ae573eb4063e..62277b6f061d4ff 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll
@@ -353,7 +353,7 @@ define <32 x i64> @vwsub_v32i64(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv4r.v v24, v8
; CHECK-NEXT: vwsub.vv v8, v16, v24
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
index cf00fe14858d918..53d37c503f3e4b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
@@ -353,7 +353,7 @@ define <32 x i64> @vwsubu_v32i64(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv4r.v v24, v8
; CHECK-NEXT: vwsubu.vv v8, v16, v24
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
index b8f2afd194e46d6..9cd5a69a253d3b9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
@@ -17,7 +17,7 @@ define <32 x i1> @vector_interleave_v32i1_v16i1(<16 x i1> %a, <16 x i1> %b) {
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v8, 16
-; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; CHECK-NEXT: vwaddu.vv v12, v8, v10
; CHECK-NEXT: li a1, -1
; CHECK-NEXT: vwmaccu.vx v12, a1, v10
@@ -35,7 +35,7 @@ define <32 x i1> @vector_interleave_v32i1_v16i1(<16 x i1> %a, <16 x i1> %b) {
; ZVBB-NEXT: vmerge.vim v8, v8, 1, v0
; ZVBB-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; ZVBB-NEXT: vslidedown.vi v10, v8, 16
-; ZVBB-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; ZVBB-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; ZVBB-NEXT: vwsll.vi v12, v10, 8
; ZVBB-NEXT: vwaddu.wv v12, v12, v8
; ZVBB-NEXT: vsetvli zero, a0, e8, m2, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-valid-elen-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-valid-elen-fp.ll
index 7bae1160a8a5fc4..f4d9355034613e9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-valid-elen-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-valid-elen-fp.ll
@@ -9,7 +9,7 @@ define void @foo(half %y, ptr %i64p) {
; CHECK-NO-FELEN64: # %bb.0: # %entry
; CHECK-NO-FELEN64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-NO-FELEN64-NEXT: vle64.v v8, (a0)
-; CHECK-NO-FELEN64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NO-FELEN64-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NO-FELEN64-NEXT: vfmv.s.f v9, fa0
; CHECK-NO-FELEN64-NEXT: #APP
; CHECK-NO-FELEN64-NEXT: # use v8 v9
diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index d311311175c15e6..6be0facd48e685f 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -684,7 +684,7 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV32MV-NEXT: vmerge.vim v8, v8, -1, v0
; RV32MV-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32MV-NEXT: vse32.v v8, (s0)
-; RV32MV-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32MV-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32MV-NEXT: vslidedown.vi v10, v8, 4
; RV32MV-NEXT: vmv.x.s a0, v10
; RV32MV-NEXT: srli a1, a0, 30
@@ -694,7 +694,7 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV32MV-NEXT: or a1, a2, a1
; RV32MV-NEXT: andi a1, a1, 7
; RV32MV-NEXT: sb a1, 12(s0)
-; RV32MV-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32MV-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32MV-NEXT: vslidedown.vi v9, v8, 1
; RV32MV-NEXT: vmv.x.s a1, v9
; RV32MV-NEXT: vslidedown.vi v9, v8, 2
@@ -787,7 +787,7 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV64MV-NEXT: sb a3, 12(a0)
; RV64MV-NEXT: vmv.x.s a3, v8
; RV64MV-NEXT: and a1, a3, a1
-; RV64MV-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64MV-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; RV64MV-NEXT: vslidedown.vi v8, v8, 1
; RV64MV-NEXT: vmv.x.s a3, v8
; RV64MV-NEXT: slli a4, a3, 33
More information about the llvm-commits
mailing list