[llvm] [RISCV] enable VTYPE before whole RVVReg move (PR #117866)
Piyou Chen via llvm-commits
llvm-commits at lists.llvm.org
Sun Dec 1 22:34:16 PST 2024
https://github.com/BeMg updated https://github.com/llvm/llvm-project/pull/117866
>From b0d5b06529acd451222fc01c0520eace9994d1ae Mon Sep 17 00:00:00 2001
From: Piyou Chen <piyou.chen at sifive.com>
Date: Wed, 27 Nov 2024 00:58:22 -0800
Subject: [PATCH 1/7] [RISCV] Add VSETVLI to whole RVVReg Copy
---
llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 56 +-
.../CodeGen/RISCV/inline-asm-v-constraint.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/abs-vp.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll | 4 +
llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll | 4 +
.../CodeGen/RISCV/rvv/calling-conv-fastcc.ll | 4 +
llvm/test/CodeGen/RISCV/rvv/calling-conv.ll | 4 +
llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll | 20 +
llvm/test/CodeGen/RISCV/rvv/compressstore.ll | 2 +
.../RISCV/rvv/constant-folding-crash.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/copyprop.mir | 1 +
llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll | 8 +
llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll | 4 +
llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll | 10 +
llvm/test/CodeGen/RISCV/rvv/expandload.ll | 514 ++++
.../CodeGen/RISCV/rvv/extract-subvector.ll | 19 +
.../rvv/fixed-vector-i8-index-cornercase.ll | 2 +
.../RISCV/rvv/fixed-vectors-bitreverse-vp.ll | 2 +
.../rvv/fixed-vectors-calling-conv-fastcc.ll | 1 +
.../RISCV/rvv/fixed-vectors-calling-conv.ll | 1 +
.../RISCV/rvv/fixed-vectors-ceil-vp.ll | 11 +
.../RISCV/rvv/fixed-vectors-ctpop-vp.ll | 1 +
.../RISCV/rvv/fixed-vectors-floor-vp.ll | 11 +
.../RISCV/rvv/fixed-vectors-fmaximum-vp.ll | 18 +
.../RISCV/rvv/fixed-vectors-fminimum-vp.ll | 18 +
.../RISCV/rvv/fixed-vectors-fp-interleave.ll | 2 +
.../RISCV/rvv/fixed-vectors-fptrunc-vp.ll | 1 +
.../RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll | 1 +
.../rvv/fixed-vectors-insert-subvector.ll | 2 +
.../RISCV/rvv/fixed-vectors-int-interleave.ll | 2 +
.../RISCV/rvv/fixed-vectors-masked-gather.ll | 2057 ++++++++---------
.../rvv/fixed-vectors-masked-load-int.ll | 1 +
.../RISCV/rvv/fixed-vectors-nearbyint-vp.ll | 9 +
.../rvv/fixed-vectors-reduction-mask-vp.ll | 31 +
.../RISCV/rvv/fixed-vectors-rint-vp.ll | 9 +
.../RISCV/rvv/fixed-vectors-round-vp.ll | 11 +
.../RISCV/rvv/fixed-vectors-roundeven-vp.ll | 11 +
.../RISCV/rvv/fixed-vectors-roundtozero-vp.ll | 11 +
.../RISCV/rvv/fixed-vectors-setcc-int-vp.ll | 6 +
.../RISCV/rvv/fixed-vectors-shuffle-concat.ll | 9 +
.../rvv/fixed-vectors-shuffle-exact-vlen.ll | 2 +
.../rvv/fixed-vectors-shuffle-reverse.ll | 11 +
.../rvv/fixed-vectors-shuffle-vslide1up.ll | 1 +
.../fixed-vectors-strided-load-store-asm.ll | 1 +
.../RISCV/rvv/fixed-vectors-strided-vpload.ll | 3 +
.../RISCV/rvv/fixed-vectors-trunc-vp.ll | 9 +
.../RISCV/rvv/fixed-vectors-unaligned.ll | 68 +-
.../RISCV/rvv/fixed-vectors-vadd-vp.ll | 2 +
.../RISCV/rvv/fixed-vectors-vmax-vp.ll | 2 +
.../RISCV/rvv/fixed-vectors-vmaxu-vp.ll | 2 +
.../RISCV/rvv/fixed-vectors-vmin-vp.ll | 2 +
.../RISCV/rvv/fixed-vectors-vminu-vp.ll | 2 +
.../RISCV/rvv/fixed-vectors-vpgather.ll | 2 +
.../CodeGen/RISCV/rvv/fixed-vectors-vpload.ll | 2 +
.../RISCV/rvv/fixed-vectors-vpmerge.ll | 1 +
.../RISCV/rvv/fixed-vectors-vsadd-vp.ll | 2 +
.../RISCV/rvv/fixed-vectors-vsaddu-vp.ll | 2 +
.../RISCV/rvv/fixed-vectors-vselect-vp.ll | 2 +
.../RISCV/rvv/fixed-vectors-vssub-vp.ll | 2 +
.../RISCV/rvv/fixed-vectors-vssubu-vp.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/floor-vp.ll | 20 +
.../test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll | 3 +
llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll | 26 +
.../test/CodeGen/RISCV/rvv/fminimum-sdnode.ll | 3 +
llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll | 26 +
.../RISCV/rvv/fold-scalar-load-crash.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll | 7 +
llvm/test/CodeGen/RISCV/rvv/inline-asm.ll | 7 +
.../CodeGen/RISCV/rvv/insert-subvector.ll | 22 +
llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/masked-tama.ll | 3 +
llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll | 16 +
.../test/CodeGen/RISCV/rvv/mscatter-sdnode.ll | 1 +
.../RISCV/rvv/named-vector-shuffle-reverse.ll | 13 +
llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll | 20 +
llvm/test/CodeGen/RISCV/rvv/pr88576.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/rint-vp.ll | 20 +
llvm/test/CodeGen/RISCV/rvv/round-vp.ll | 20 +
llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll | 20 +
llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll | 20 +
.../RISCV/rvv/rv32-spill-vector-csr.ll | 1 +
.../RISCV/rvv/rv64-spill-vector-csr.ll | 1 +
.../test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll | 1 +
.../RISCV/rvv/rvv-peephole-vmerge-vops.ll | 3 +
llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll | 8 +
llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll | 12 +
.../CodeGen/RISCV/rvv/sink-splat-operands.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll | 4 +
.../test/CodeGen/RISCV/rvv/strided-vpstore.ll | 2 +
.../RISCV/rvv/undef-earlyclobber-chain.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll | 6 +
llvm/test/CodeGen/RISCV/rvv/vcpop.ll | 7 +
.../RISCV/rvv/vector-deinterleave-fixed.ll | 1 +
.../CodeGen/RISCV/rvv/vector-deinterleave.ll | 8 +
.../RISCV/rvv/vector-interleave-fixed.ll | 4 +
.../RISCV/rvv/vector-interleave-store.ll | 1 +
.../CodeGen/RISCV/rvv/vector-interleave.ll | 15 +
.../RISCV/rvv/vector-reassociations.ll | 4 +
llvm/test/CodeGen/RISCV/rvv/vector-splice.ll | 12 +
llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll | 6 +
llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll | 6 +
llvm/test/CodeGen/RISCV/rvv/vfirst.ll | 7 +
llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll | 16 +
.../RISCV/rvv/vfmadd-constrained-sdnode.ll | 4 +
llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll | 6 +
llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll | 3 +
llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll | 2 +
.../RISCV/rvv/vfnmadd-constrained-sdnode.ll | 1 +
.../RISCV/rvv/vfnmsub-constrained-sdnode.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll | 4 +
llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll | 4 +
llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll | 6 +
llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll | 4 +
llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll | 6 +
llvm/test/CodeGen/RISCV/rvv/vl-opt.ll | 2 +
.../CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll | 165 ++
.../CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll | 165 ++
llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll | 6 +
llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll | 6 +
llvm/test/CodeGen/RISCV/rvv/vmfeq.ll | 12 +
llvm/test/CodeGen/RISCV/rvv/vmfge.ll | 12 +
llvm/test/CodeGen/RISCV/rvv/vmfgt.ll | 12 +
llvm/test/CodeGen/RISCV/rvv/vmfle.ll | 12 +
llvm/test/CodeGen/RISCV/rvv/vmflt.ll | 12 +
llvm/test/CodeGen/RISCV/rvv/vmfne.ll | 12 +
llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll | 6 +
llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll | 6 +
llvm/test/CodeGen/RISCV/rvv/vmsbf.ll | 7 +
llvm/test/CodeGen/RISCV/rvv/vmseq.ll | 36 +
llvm/test/CodeGen/RISCV/rvv/vmsge.ll | 37 +
llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll | 36 +
llvm/test/CodeGen/RISCV/rvv/vmsgt.ll | 36 +
llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll | 36 +
llvm/test/CodeGen/RISCV/rvv/vmsif.ll | 7 +
llvm/test/CodeGen/RISCV/rvv/vmsle.ll | 36 +
llvm/test/CodeGen/RISCV/rvv/vmsleu.ll | 36 +
llvm/test/CodeGen/RISCV/rvv/vmslt.ll | 36 +
llvm/test/CodeGen/RISCV/rvv/vmsltu.ll | 36 +
llvm/test/CodeGen/RISCV/rvv/vmsne.ll | 36 +
llvm/test/CodeGen/RISCV/rvv/vmsof.ll | 7 +
.../CodeGen/RISCV/rvv/vmv.v.v-peephole.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll | 4 +
llvm/test/CodeGen/RISCV/rvv/vp-select.ll | 1 +
.../RISCV/rvv/vp-splice-mask-fixed-vectors.ll | 12 +
.../RISCV/rvv/vp-splice-mask-vectors.ll | 21 +
.../test/CodeGen/RISCV/rvv/vpgather-sdnode.ll | 18 +
llvm/test/CodeGen/RISCV/rvv/vpload.ll | 4 +
llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll | 6 +
llvm/test/CodeGen/RISCV/rvv/vpstore.ll | 2 +
.../CodeGen/RISCV/rvv/vreductions-mask-vp.ll | 60 +-
.../RISCV/rvv/vrgatherei16-subreg-liveness.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll | 4 +
llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll | 4 +
llvm/test/CodeGen/RISCV/rvv/vselect-bf16.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/vselect-int.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll | 11 +
.../CodeGen/RISCV/rvv/vsetvli-insert-O0.ll | 6 +
llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll | 1 +
llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll | 2 +
llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll | 6 +
llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll | 24 +-
llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll | 24 +-
llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll | 10 +
llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll | 6 +
llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll | 2 +
174 files changed, 3342 insertions(+), 1143 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 052b4a61298223..64bb732b575ab7 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -922,6 +922,7 @@ class RISCVInsertVSETVLI : public MachineFunctionPass {
VSETVLIInfo getInfoForVSETVLI(const MachineInstr &MI) const;
VSETVLIInfo computeInfoForInstr(const MachineInstr &MI) const;
void forwardVSETVLIAVL(VSETVLIInfo &Info) const;
+ void enableVTYPEBeforeMove(MachineBasicBlock &MBB);
};
} // end anonymous namespace
@@ -1768,6 +1769,52 @@ void RISCVInsertVSETVLI::insertReadVL(MachineBasicBlock &MBB) {
}
}
+static bool isRVVCopy(const MachineInstr &MI) {
+ static const TargetRegisterClass *RVVRegClasses[] = {
+ &RISCV::VRRegClass, &RISCV::VRM2RegClass, &RISCV::VRM4RegClass,
+ &RISCV::VRM8RegClass, &RISCV::VRN2M1RegClass, &RISCV::VRN2M2RegClass,
+ &RISCV::VRN2M4RegClass, &RISCV::VRN3M1RegClass, &RISCV::VRN3M2RegClass,
+ &RISCV::VRN4M1RegClass, &RISCV::VRN4M2RegClass, &RISCV::VRN5M1RegClass,
+ &RISCV::VRN6M1RegClass, &RISCV::VRN7M1RegClass, &RISCV::VRN8M1RegClass};
+
+ if (MI.getOpcode() != TargetOpcode::COPY)
+ return false;
+
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = MI.getOperand(1).getReg();
+ for (const auto &RegClass : RVVRegClasses) {
+ if (RegClass->contains(DstReg, SrcReg)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void RISCVInsertVSETVLI::enableVTYPEBeforeMove(MachineBasicBlock &MBB) {
+ bool NeedVSETVL = true;
+
+ for (auto &MI : MBB) {
+ if (isVectorConfigInstr(MI) || RISCVII::hasSEWOp(MI.getDesc().TSFlags))
+ NeedVSETVL = false;
+
+ if (MI.isCall() || MI.isInlineAsm())
+ NeedVSETVL = true;
+
+ if (NeedVSETVL && isRVVCopy(MI)) {
+ auto VSETVL0MI =
+ BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(RISCV::PseudoVSETVLIX0))
+ .addReg(RISCV::X0, RegState::Define | RegState::Dead)
+ .addReg(RISCV::X0, RegState::Kill)
+ .addImm(RISCVVType::encodeVTYPE(RISCVII::VLMUL::LMUL_1, 32, false,
+ false))
+ .addReg(RISCV::VL, RegState::Implicit);
+ if (LIS)
+ LIS->InsertMachineInstrInMaps(*VSETVL0MI);
+ NeedVSETVL = false;
+ }
+ }
+}
+
bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) {
// Skip if the vector extension is not enabled.
ST = &MF.getSubtarget<RISCVSubtarget>();
@@ -1798,12 +1845,6 @@ bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) {
}
- // If we didn't find any instructions that need VSETVLI, we're done.
- if (!HaveVectorOp) {
- BlockInfo.clear();
- return false;
- }
-
// Phase 2 - determine the exit VL/VTYPE from each block. We add all
// blocks to the list here, but will also add any that need to be revisited
// during Phase 2 processing.
@@ -1842,6 +1883,9 @@ bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) {
for (MachineBasicBlock &MBB : MF)
insertReadVL(MBB);
+ for (MachineBasicBlock &MBB : MF)
+ enableVTYPEBeforeMove(MBB);
+
BlockInfo.clear();
return HaveVectorOp;
}
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-v-constraint.ll b/llvm/test/CodeGen/RISCV/inline-asm-v-constraint.ll
index c04e4fea7b2c29..6b566e2df0d798 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-v-constraint.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-v-constraint.ll
@@ -45,6 +45,7 @@ define <vscale x 1 x i8> @constraint_vd(<vscale x 1 x i8> %0, <vscale x 1 x i8>
define <vscale x 1 x i1> @constraint_vm(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1) nounwind {
; RV32I-LABEL: constraint_vm:
; RV32I: # %bb.0:
+; RV32I-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32I-NEXT: vmv1r.v v9, v0
; RV32I-NEXT: vmv1r.v v0, v8
; RV32I-NEXT: #APP
@@ -54,6 +55,7 @@ define <vscale x 1 x i1> @constraint_vm(<vscale x 1 x i1> %0, <vscale x 1 x i1>
;
; RV64I-LABEL: constraint_vm:
; RV64I: # %bb.0:
+; RV64I-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64I-NEXT: vmv1r.v v9, v0
; RV64I-NEXT: vmv1r.v v0, v8
; RV64I-NEXT: #APP
diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
index 163d9145bc3623..c5ff5deabb6e75 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
@@ -567,6 +567,7 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64(<vscale x 16 x i64> %va, <vscale x 1
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -590,6 +591,7 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64(<vscale x 16 x i64> %va, <vscale x 1
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB46_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
index 66a1178cddb66c..a41b63babd8a08 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
@@ -3075,6 +3075,7 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -3121,6 +3122,7 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a3
; CHECK-NEXT: .LBB46_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: slli a3, a3, 3
@@ -3158,6 +3160,7 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
;
; CHECK-ZVBB-LABEL: vp_bitreverse_nxv64i16:
; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v24, v0
; CHECK-ZVBB-NEXT: csrr a1, vlenb
; CHECK-ZVBB-NEXT: srli a2, a1, 1
@@ -3174,6 +3177,7 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB46_2:
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
index 1c95ec8fafd4f1..12d98b05dbb44e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
@@ -1584,6 +1584,7 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -1609,6 +1610,7 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -1631,6 +1633,7 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
;
; CHECK-ZVKB-LABEL: vp_bswap_nxv64i16:
; CHECK-ZVKB: # %bb.0:
+; CHECK-ZVKB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVKB-NEXT: vmv1r.v v24, v0
; CHECK-ZVKB-NEXT: csrr a1, vlenb
; CHECK-ZVKB-NEXT: srli a2, a1, 1
@@ -1647,6 +1650,7 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
; CHECK-ZVKB-NEXT: # %bb.1:
; CHECK-ZVKB-NEXT: mv a0, a1
; CHECK-ZVKB-NEXT: .LBB32_2:
+; CHECK-ZVKB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVKB-NEXT: vmv1r.v v0, v24
; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-ZVKB-NEXT: vrev8.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
index a4e5ab661c5285..e85a7af56cc497 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
@@ -336,6 +336,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_i32(<vsca
; RV32-NEXT: add a1, a3, a1
; RV32-NEXT: li a3, 2
; RV32-NEXT: vs8r.v v16, (a1)
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv8r.v v8, v0
; RV32-NEXT: vmv8r.v v16, v24
; RV32-NEXT: call ext2
@@ -374,6 +375,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_i32(<vsca
; RV64-NEXT: add a1, a3, a1
; RV64-NEXT: li a3, 2
; RV64-NEXT: vs8r.v v16, (a1)
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv8r.v v8, v0
; RV64-NEXT: vmv8r.v v16, v24
; RV64-NEXT: call ext2
@@ -451,6 +453,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 128
; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv8r.v v16, v0
; RV32-NEXT: call ext3
; RV32-NEXT: addi sp, s0, -144
@@ -523,6 +526,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 128
; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv8r.v v16, v0
; RV64-NEXT: call ext3
; RV64-NEXT: addi sp, s0, -144
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
index 9b27116fef7cae..05873a4e83aa29 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
@@ -103,6 +103,7 @@ define target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @caller_tuple_return(
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: call callee_tuple_return
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv2r.v v6, v8
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: vmv2r.v v10, v6
@@ -119,6 +120,7 @@ define target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @caller_tuple_return(
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: call callee_tuple_return
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv2r.v v6, v8
; RV64-NEXT: vmv2r.v v8, v10
; RV64-NEXT: vmv2r.v v10, v6
@@ -144,6 +146,7 @@ define void @caller_tuple_argument(target("riscv.vector.tuple", <vscale x 16 x i
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv2r.v v6, v8
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: vmv2r.v v10, v6
@@ -160,6 +163,7 @@ define void @caller_tuple_argument(target("riscv.vector.tuple", <vscale x 16 x i
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv2r.v v6, v8
; RV64-NEXT: vmv2r.v v8, v10
; RV64-NEXT: vmv2r.v v10, v6
diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
index 7d0b0118a72725..d9644b70531f39 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
@@ -117,6 +117,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.ceil.nxv4bf16(<vscale x 4 x bfloat>, <vsc
define <vscale x 4 x bfloat> @vp_ceil_vv_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv4bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -169,6 +170,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.ceil.nxv8bf16(<vscale x 8 x bfloat>, <vsc
define <vscale x 8 x bfloat> @vp_ceil_vv_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv8bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -221,6 +223,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.ceil.nxv16bf16(<vscale x 16 x bfloat>, <
define <vscale x 16 x bfloat> @vp_ceil_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv16bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -279,6 +282,7 @@ define <vscale x 32 x bfloat> @vp_ceil_vv_nxv32bf16(<vscale x 32 x bfloat> %va,
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -582,6 +586,7 @@ define <vscale x 4 x half> @vp_ceil_vv_nxv4f16(<vscale x 4 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vp_ceil_vv_nxv4f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -649,6 +654,7 @@ declare <vscale x 8 x half> @llvm.vp.ceil.nxv8f16(<vscale x 8 x half>, <vscale x
define <vscale x 8 x half> @vp_ceil_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv8f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -668,6 +674,7 @@ define <vscale x 8 x half> @vp_ceil_vv_nxv8f16(<vscale x 8 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vp_ceil_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -735,6 +742,7 @@ declare <vscale x 16 x half> @llvm.vp.ceil.nxv16f16(<vscale x 16 x half>, <vscal
define <vscale x 16 x half> @vp_ceil_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv16f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -754,6 +762,7 @@ define <vscale x 16 x half> @vp_ceil_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
;
; ZVFHMIN-LABEL: vp_ceil_vv_nxv16f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -821,6 +830,7 @@ declare <vscale x 32 x half> @llvm.vp.ceil.nxv32f16(<vscale x 32 x half>, <vscal
define <vscale x 32 x half> @vp_ceil_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv32f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -846,6 +856,7 @@ define <vscale x 32 x half> @vp_ceil_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -1068,6 +1079,7 @@ declare <vscale x 4 x float> @llvm.vp.ceil.nxv4f32(<vscale x 4 x float>, <vscale
define <vscale x 4 x float> @vp_ceil_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv4f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1112,6 +1124,7 @@ declare <vscale x 8 x float> @llvm.vp.ceil.nxv8f32(<vscale x 8 x float>, <vscale
define <vscale x 8 x float> @vp_ceil_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv8f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1156,6 +1169,7 @@ declare <vscale x 16 x float> @llvm.vp.ceil.nxv16f32(<vscale x 16 x float>, <vsc
define <vscale x 16 x float> @vp_ceil_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv16f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1242,6 +1256,7 @@ declare <vscale x 2 x double> @llvm.vp.ceil.nxv2f64(<vscale x 2 x double>, <vsca
define <vscale x 2 x double> @vp_ceil_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv2f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1286,6 +1301,7 @@ declare <vscale x 4 x double> @llvm.vp.ceil.nxv4f64(<vscale x 4 x double>, <vsca
define <vscale x 4 x double> @vp_ceil_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv4f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1330,6 +1346,7 @@ declare <vscale x 7 x double> @llvm.vp.ceil.nxv7f64(<vscale x 7 x double>, <vsca
define <vscale x 7 x double> @vp_ceil_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv7f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1374,6 +1391,7 @@ declare <vscale x 8 x double> @llvm.vp.ceil.nxv8f64(<vscale x 8 x double>, <vsca
define <vscale x 8 x double> @vp_ceil_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv8f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1425,6 +1443,7 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64(<vscale x 16 x double> %va, <
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
@@ -1458,6 +1477,7 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64(<vscale x 16 x double> %va, <
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB44_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
index bfb2d0a3accc44..d1679b6e2d7fdf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
@@ -197,6 +197,7 @@ entry:
define void @test_compresstore_v256i8(ptr %p, <256 x i1> %mask, <256 x i8> %data) {
; RV64-LABEL: test_compresstore_v256i8:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v7, v8
; RV64-NEXT: li a2, 128
; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
@@ -230,6 +231,7 @@ define void @test_compresstore_v256i8(ptr %p, <256 x i1> %mask, <256 x i8> %data
; RV32-NEXT: slli a2, a2, 3
; RV32-NEXT: sub sp, sp, a2
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv8r.v v24, v16
; RV32-NEXT: li a2, 128
; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
index ad176df71397e6..279eccce002ea8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
@@ -18,6 +18,7 @@
define void @constant_folding_crash(ptr %v54, <4 x ptr> %lanes.a, <4 x ptr> %lanes.b, <4 x i1> %sel) {
; RV32-LABEL: constant_folding_crash:
; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v10, v0
; RV32-NEXT: lw a0, 8(a0)
; RV32-NEXT: andi a0, a0, 1
@@ -43,6 +44,7 @@ define void @constant_folding_crash(ptr %v54, <4 x ptr> %lanes.a, <4 x ptr> %lan
;
; RV64-LABEL: constant_folding_crash:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: ld a0, 8(a0)
; RV64-NEXT: andi a0, a0, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/copyprop.mir b/llvm/test/CodeGen/RISCV/rvv/copyprop.mir
index a9da6c305aac3c..a4cfbe208581b7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/copyprop.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/copyprop.mir
@@ -22,6 +22,7 @@
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: bgeu a0, a2, .LBB0_2
; CHECK-NEXT: .LBB0_4: # %entry
+ ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vse64.v v8, (a1)
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
index f56a792fdef6a8..9f2712b9506426 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
@@ -1235,6 +1235,7 @@ declare <vscale x 16 x i64> @llvm.vp.ctlz.nxv16i64(<vscale x 16 x i64>, i1 immar
define <vscale x 16 x i64> @vp_ctlz_nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ctlz_nxv16i64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: fsrmi a4, 1
@@ -1259,6 +1260,7 @@ define <vscale x 16 x i64> @vp_ctlz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB46_2:
; CHECK-NEXT: fsrmi a1, 1
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t
@@ -1270,6 +1272,7 @@ define <vscale x 16 x i64> @vp_ctlz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
;
; CHECK-ZVBB-LABEL: vp_ctlz_nxv16i64:
; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v24, v0
; CHECK-ZVBB-NEXT: csrr a1, vlenb
; CHECK-ZVBB-NEXT: srli a2, a1, 3
@@ -1285,6 +1288,7 @@ define <vscale x 16 x i64> @vp_ctlz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB46_2:
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vclz.v v8, v8, v0.t
@@ -2465,6 +2469,7 @@ define <vscale x 8 x i64> @vp_ctlz_zero_undef_nxv8i64_unmasked(<vscale x 8 x i64
define <vscale x 16 x i64> @vp_ctlz_zero_undef_nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ctlz_zero_undef_nxv16i64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: fsrmi a3, 1
@@ -2487,6 +2492,7 @@ define <vscale x 16 x i64> @vp_ctlz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB94_2:
; CHECK-NEXT: fsrmi a1, 1
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t
@@ -2497,6 +2503,7 @@ define <vscale x 16 x i64> @vp_ctlz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
;
; CHECK-ZVBB-LABEL: vp_ctlz_zero_undef_nxv16i64:
; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v24, v0
; CHECK-ZVBB-NEXT: csrr a1, vlenb
; CHECK-ZVBB-NEXT: srli a2, a1, 3
@@ -2512,6 +2519,7 @@ define <vscale x 16 x i64> @vp_ctlz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB94_2:
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vclz.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
index 9e75dc9dccffde..27c7120a35b9b3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
@@ -2022,6 +2022,7 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v7, v0
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: li a2, 24
@@ -2126,6 +2127,7 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB46_2:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: li a3, 24
; RV32-NEXT: mul a1, a1, a3
@@ -2294,6 +2296,7 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
;
; CHECK-ZVBB-LABEL: vp_ctpop_nxv16i64:
; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v24, v0
; CHECK-ZVBB-NEXT: csrr a1, vlenb
; CHECK-ZVBB-NEXT: srli a2, a1, 3
@@ -2309,6 +2312,7 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB46_2:
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
index 9e6295b6644171..7be34275ce27c0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
@@ -2246,6 +2246,7 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 56 * vlenb
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v24, v0
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: slli a1, a1, 5
@@ -2390,6 +2391,7 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB46_2:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v0, v24
; RV32-NEXT: slli a1, a1, 5
; RV32-NEXT: add a1, sp, a1
@@ -2499,6 +2501,7 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV64-NEXT: slli a1, a1, 4
; RV64-NEXT: sub sp, sp, a1
; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v24, v0
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: slli a1, a1, 3
@@ -2553,6 +2556,7 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB46_2:
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: slli a1, a1, 3
; RV64-NEXT: add a1, sp, a1
@@ -2586,6 +2590,7 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
;
; CHECK-ZVBB-LABEL: vp_cttz_nxv16i64:
; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v24, v0
; CHECK-ZVBB-NEXT: csrr a1, vlenb
; CHECK-ZVBB-NEXT: srli a2, a1, 3
@@ -2601,6 +2606,7 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB46_2:
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vctz.v v8, v8, v0.t
@@ -4002,6 +4008,7 @@ define <vscale x 16 x i64> @vp_cttz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -4032,6 +4039,7 @@ define <vscale x 16 x i64> @vp_cttz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB94_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
@@ -4057,6 +4065,7 @@ define <vscale x 16 x i64> @vp_cttz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
;
; CHECK-ZVBB-LABEL: vp_cttz_zero_undef_nxv16i64:
; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v24, v0
; CHECK-ZVBB-NEXT: csrr a1, vlenb
; CHECK-ZVBB-NEXT: srli a2, a1, 3
@@ -4072,6 +4081,7 @@ define <vscale x 16 x i64> @vp_cttz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB94_2:
+; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vctz.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/expandload.ll b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
index b32d85bb1943a5..07159c0c0cd8f6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/expandload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
@@ -227,6 +227,7 @@ define <256 x i8> @test_expandload_v256i8(ptr %base, <256 x i1> %mask, <256 x i8
; CHECK-RV32-NEXT: add a2, sp, a2
; CHECK-RV32-NEXT: addi a2, a2, 16
; CHECK-RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv1r.v v7, v8
; CHECK-RV32-NEXT: li a2, 128
; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
@@ -338,6 +339,7 @@ define <256 x i8> @test_expandload_v256i8(ptr %base, <256 x i1> %mask, <256 x i8
; CHECK-RV64-NEXT: add a2, sp, a2
; CHECK-RV64-NEXT: addi a2, a2, 16
; CHECK-RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv1r.v v7, v8
; CHECK-RV64-NEXT: li a2, 128
; CHECK-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
@@ -1626,6 +1628,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a1, .LBB61_30
; CHECK-RV32-NEXT: .LBB61_29: # %cond.load109
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 29, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -1639,6 +1642,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a2, .LBB61_32
; CHECK-RV32-NEXT: # %bb.31: # %cond.load113
; CHECK-RV32-NEXT: lbu a2, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 30, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a2
@@ -1653,6 +1657,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a2, .LBB61_34
; CHECK-RV32-NEXT: # %bb.33: # %cond.load117
; CHECK-RV32-NEXT: lbu a2, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v9, a2
; CHECK-RV32-NEXT: vsetivli zero, 31, e8, m1, tu, ma
@@ -1787,6 +1792,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_65: # %cond.load241
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -1804,6 +1810,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a3, .LBB61_68
; CHECK-RV32-NEXT: # %bb.67: # %cond.load245
; CHECK-RV32-NEXT: lbu a3, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v9, a3
; CHECK-RV32-NEXT: li a3, 63
@@ -1940,6 +1947,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_99: # %cond.load369
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -1957,6 +1965,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a2, .LBB61_102
; CHECK-RV32-NEXT: # %bb.101: # %cond.load373
; CHECK-RV32-NEXT: lbu a2, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v10, a2
; CHECK-RV32-NEXT: li a2, 95
@@ -2093,6 +2102,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_133: # %cond.load497
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -2110,6 +2120,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a3, .LBB61_136
; CHECK-RV32-NEXT: # %bb.135: # %cond.load501
; CHECK-RV32-NEXT: lbu a3, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v10, a3
; CHECK-RV32-NEXT: li a3, 127
@@ -2246,6 +2257,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_167: # %cond.load625
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -2263,6 +2275,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a2, .LBB61_170
; CHECK-RV32-NEXT: # %bb.169: # %cond.load629
; CHECK-RV32-NEXT: lbu a2, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v12, a2
; CHECK-RV32-NEXT: li a2, 159
@@ -2399,6 +2412,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_201: # %cond.load753
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -2416,6 +2430,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a3, .LBB61_204
; CHECK-RV32-NEXT: # %bb.203: # %cond.load757
; CHECK-RV32-NEXT: lbu a3, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v12, a3
; CHECK-RV32-NEXT: li a3, 191
@@ -2552,6 +2567,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_235: # %cond.load881
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -2569,6 +2585,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a2, .LBB61_238
; CHECK-RV32-NEXT: # %bb.237: # %cond.load885
; CHECK-RV32-NEXT: lbu a2, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v12, a2
; CHECK-RV32-NEXT: li a2, 223
@@ -2705,6 +2722,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_269: # %cond.load1009
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -2722,6 +2740,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a3, .LBB61_272
; CHECK-RV32-NEXT: # %bb.271: # %cond.load1013
; CHECK-RV32-NEXT: lbu a3, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v12, a3
; CHECK-RV32-NEXT: li a3, 255
@@ -3896,6 +3915,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: ret
; CHECK-RV32-NEXT: .LBB61_544: # %cond.load
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, zero, e8, mf8, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v8, a1
@@ -3907,6 +3927,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_2
; CHECK-RV32-NEXT: .LBB61_545: # %cond.load1
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -3920,6 +3941,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_3
; CHECK-RV32-NEXT: .LBB61_546: # %cond.load5
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 3, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -3932,6 +3954,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_4
; CHECK-RV32-NEXT: .LBB61_547: # %cond.load9
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -3944,6 +3967,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_5
; CHECK-RV32-NEXT: .LBB61_548: # %cond.load13
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 5, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -3956,6 +3980,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_6
; CHECK-RV32-NEXT: .LBB61_549: # %cond.load17
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 6, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -3968,6 +3993,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_7
; CHECK-RV32-NEXT: .LBB61_550: # %cond.load21
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 7, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -3980,6 +4006,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_8
; CHECK-RV32-NEXT: .LBB61_551: # %cond.load25
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 8, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -3992,6 +4019,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_9
; CHECK-RV32-NEXT: .LBB61_552: # %cond.load29
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 9, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4004,6 +4032,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_10
; CHECK-RV32-NEXT: .LBB61_553: # %cond.load33
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 10, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4016,6 +4045,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_11
; CHECK-RV32-NEXT: .LBB61_554: # %cond.load37
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 11, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4028,6 +4058,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_12
; CHECK-RV32-NEXT: .LBB61_555: # %cond.load41
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 12, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4040,6 +4071,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_13
; CHECK-RV32-NEXT: .LBB61_556: # %cond.load45
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 13, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4052,6 +4084,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_14
; CHECK-RV32-NEXT: .LBB61_557: # %cond.load49
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 14, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4064,6 +4097,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_15
; CHECK-RV32-NEXT: .LBB61_558: # %cond.load53
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 15, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4076,6 +4110,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_16
; CHECK-RV32-NEXT: .LBB61_559: # %cond.load57
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 16, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4088,6 +4123,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_17
; CHECK-RV32-NEXT: .LBB61_560: # %cond.load61
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 17, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4100,6 +4136,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_18
; CHECK-RV32-NEXT: .LBB61_561: # %cond.load65
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 18, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4112,6 +4149,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_19
; CHECK-RV32-NEXT: .LBB61_562: # %cond.load69
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 19, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4124,6 +4162,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_20
; CHECK-RV32-NEXT: .LBB61_563: # %cond.load73
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 20, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4136,6 +4175,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_21
; CHECK-RV32-NEXT: .LBB61_564: # %cond.load77
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 21, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4148,6 +4188,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_22
; CHECK-RV32-NEXT: .LBB61_565: # %cond.load81
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 22, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4160,6 +4201,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_23
; CHECK-RV32-NEXT: .LBB61_566: # %cond.load85
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 23, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4172,6 +4214,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_24
; CHECK-RV32-NEXT: .LBB61_567: # %cond.load89
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 24, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4184,6 +4227,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_25
; CHECK-RV32-NEXT: .LBB61_568: # %cond.load93
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 25, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4196,6 +4240,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_26
; CHECK-RV32-NEXT: .LBB61_569: # %cond.load97
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 26, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4208,6 +4253,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_27
; CHECK-RV32-NEXT: .LBB61_570: # %cond.load101
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 27, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4220,6 +4266,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_28
; CHECK-RV32-NEXT: .LBB61_571: # %cond.load105
; CHECK-RV32-NEXT: lbu a1, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 28, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4234,6 +4281,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_30
; CHECK-RV32-NEXT: .LBB61_572: # %cond.load121
; CHECK-RV32-NEXT: lbu a3, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vmv.s.x v9, a3
; CHECK-RV32-NEXT: li a3, 32
@@ -4248,6 +4296,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_573: # %cond.load125
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4264,6 +4313,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_574: # %cond.load129
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4280,6 +4330,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_575: # %cond.load133
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4296,6 +4347,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_576: # %cond.load137
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4312,6 +4364,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_577: # %cond.load141
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4328,6 +4381,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_578: # %cond.load145
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4344,6 +4398,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_579: # %cond.load149
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4360,6 +4415,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_580: # %cond.load153
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4376,6 +4432,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_581: # %cond.load157
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4392,6 +4449,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_582: # %cond.load161
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4408,6 +4466,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_583: # %cond.load165
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4424,6 +4483,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_584: # %cond.load169
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4440,6 +4500,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_585: # %cond.load173
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4456,6 +4517,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_586: # %cond.load177
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4472,6 +4534,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_587: # %cond.load181
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4488,6 +4551,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_588: # %cond.load185
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4504,6 +4568,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_589: # %cond.load189
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4520,6 +4585,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_590: # %cond.load193
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4536,6 +4602,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_591: # %cond.load197
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4552,6 +4619,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_592: # %cond.load201
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4568,6 +4636,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_593: # %cond.load205
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4584,6 +4653,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_594: # %cond.load209
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4600,6 +4670,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_595: # %cond.load213
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4616,6 +4687,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_596: # %cond.load217
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4632,6 +4704,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_597: # %cond.load221
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4648,6 +4721,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_598: # %cond.load225
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4664,6 +4738,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_599: # %cond.load229
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4680,6 +4755,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_600: # %cond.load233
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4696,6 +4772,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_601: # %cond.load237
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4713,6 +4790,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_66
; CHECK-RV32-NEXT: .LBB61_602: # %cond.load249
; CHECK-RV32-NEXT: lbu a2, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v9, a2
; CHECK-RV32-NEXT: li a2, 64
@@ -4728,6 +4806,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_603: # %cond.load253
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4744,6 +4823,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_604: # %cond.load257
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4760,6 +4840,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_605: # %cond.load261
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4776,6 +4857,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_606: # %cond.load265
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4792,6 +4874,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_607: # %cond.load269
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4808,6 +4891,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_608: # %cond.load273
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4824,6 +4908,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_609: # %cond.load277
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4840,6 +4925,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_610: # %cond.load281
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4856,6 +4942,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_611: # %cond.load285
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4872,6 +4959,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_612: # %cond.load289
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4888,6 +4976,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_613: # %cond.load293
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4904,6 +4993,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_614: # %cond.load297
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4920,6 +5010,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_615: # %cond.load301
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4936,6 +5027,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_616: # %cond.load305
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4952,6 +5044,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_617: # %cond.load309
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4968,6 +5061,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_618: # %cond.load313
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4984,6 +5078,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_619: # %cond.load317
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5000,6 +5095,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_620: # %cond.load321
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5016,6 +5112,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_621: # %cond.load325
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5032,6 +5129,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_622: # %cond.load329
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5048,6 +5146,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_623: # %cond.load333
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5064,6 +5163,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_624: # %cond.load337
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5080,6 +5180,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_625: # %cond.load341
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5096,6 +5197,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_626: # %cond.load345
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5112,6 +5214,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_627: # %cond.load349
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5128,6 +5231,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_628: # %cond.load353
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5144,6 +5248,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_629: # %cond.load357
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5160,6 +5265,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_630: # %cond.load361
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5176,6 +5282,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_631: # %cond.load365
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5193,6 +5300,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_100
; CHECK-RV32-NEXT: .LBB61_632: # %cond.load377
; CHECK-RV32-NEXT: lbu a3, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vmv.s.x v10, a3
; CHECK-RV32-NEXT: li a3, 96
@@ -5208,6 +5316,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_633: # %cond.load381
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5224,6 +5333,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_634: # %cond.load385
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5240,6 +5350,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_635: # %cond.load389
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5256,6 +5367,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_636: # %cond.load393
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5272,6 +5384,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_637: # %cond.load397
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5288,6 +5401,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_638: # %cond.load401
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5304,6 +5418,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_639: # %cond.load405
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5320,6 +5435,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_640: # %cond.load409
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5336,6 +5452,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_641: # %cond.load413
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5352,6 +5469,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_642: # %cond.load417
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5368,6 +5486,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_643: # %cond.load421
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5384,6 +5503,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_644: # %cond.load425
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5400,6 +5520,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_645: # %cond.load429
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5416,6 +5537,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_646: # %cond.load433
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5432,6 +5554,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_647: # %cond.load437
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5448,6 +5571,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_648: # %cond.load441
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5464,6 +5588,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_649: # %cond.load445
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5480,6 +5605,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_650: # %cond.load449
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5496,6 +5622,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_651: # %cond.load453
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5512,6 +5639,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_652: # %cond.load457
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5528,6 +5656,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_653: # %cond.load461
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5544,6 +5673,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_654: # %cond.load465
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5560,6 +5690,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_655: # %cond.load469
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5576,6 +5707,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_656: # %cond.load473
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5592,6 +5724,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_657: # %cond.load477
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5608,6 +5741,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_658: # %cond.load481
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5624,6 +5758,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_659: # %cond.load485
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5640,6 +5775,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_660: # %cond.load489
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5656,6 +5792,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_661: # %cond.load493
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5673,6 +5810,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_134
; CHECK-RV32-NEXT: .LBB61_662: # %cond.load505
; CHECK-RV32-NEXT: lbu a2, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v10, a2
; CHECK-RV32-NEXT: li a2, 128
@@ -5688,6 +5826,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_663: # %cond.load509
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5704,6 +5843,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_664: # %cond.load513
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5720,6 +5860,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_665: # %cond.load517
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5736,6 +5877,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_666: # %cond.load521
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5752,6 +5894,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_667: # %cond.load525
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5768,6 +5911,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_668: # %cond.load529
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5784,6 +5928,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_669: # %cond.load533
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5800,6 +5945,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_670: # %cond.load537
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5816,6 +5962,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_671: # %cond.load541
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5832,6 +5979,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_672: # %cond.load545
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5848,6 +5996,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_673: # %cond.load549
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5864,6 +6013,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_674: # %cond.load553
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5880,6 +6030,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_675: # %cond.load557
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5896,6 +6047,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_676: # %cond.load561
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5912,6 +6064,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_677: # %cond.load565
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5928,6 +6081,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_678: # %cond.load569
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5944,6 +6098,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_679: # %cond.load573
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5960,6 +6115,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_680: # %cond.load577
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5976,6 +6132,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_681: # %cond.load581
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5992,6 +6149,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_682: # %cond.load585
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6008,6 +6166,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_683: # %cond.load589
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6024,6 +6183,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_684: # %cond.load593
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6040,6 +6200,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_685: # %cond.load597
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6056,6 +6217,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_686: # %cond.load601
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6072,6 +6234,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_687: # %cond.load605
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6088,6 +6251,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_688: # %cond.load609
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6104,6 +6268,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_689: # %cond.load613
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6120,6 +6285,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_690: # %cond.load617
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6136,6 +6302,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_691: # %cond.load621
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6153,6 +6320,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_168
; CHECK-RV32-NEXT: .LBB61_692: # %cond.load633
; CHECK-RV32-NEXT: lbu a3, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vmv.s.x v12, a3
; CHECK-RV32-NEXT: li a3, 160
@@ -6168,6 +6336,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_693: # %cond.load637
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6184,6 +6353,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_694: # %cond.load641
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6200,6 +6370,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_695: # %cond.load645
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6216,6 +6387,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_696: # %cond.load649
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6232,6 +6404,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_697: # %cond.load653
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6248,6 +6421,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_698: # %cond.load657
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6264,6 +6438,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_699: # %cond.load661
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6280,6 +6455,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_700: # %cond.load665
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6296,6 +6472,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_701: # %cond.load669
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6312,6 +6489,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_702: # %cond.load673
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6328,6 +6506,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_703: # %cond.load677
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6344,6 +6523,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_704: # %cond.load681
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6360,6 +6540,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_705: # %cond.load685
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6376,6 +6557,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_706: # %cond.load689
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6392,6 +6574,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_707: # %cond.load693
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6408,6 +6591,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_708: # %cond.load697
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6424,6 +6608,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_709: # %cond.load701
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6440,6 +6625,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_710: # %cond.load705
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6456,6 +6642,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_711: # %cond.load709
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6472,6 +6659,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_712: # %cond.load713
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6488,6 +6676,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_713: # %cond.load717
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6504,6 +6693,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_714: # %cond.load721
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6520,6 +6710,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_715: # %cond.load725
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6536,6 +6727,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_716: # %cond.load729
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6552,6 +6744,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_717: # %cond.load733
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6568,6 +6761,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_718: # %cond.load737
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6584,6 +6778,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_719: # %cond.load741
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6600,6 +6795,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_720: # %cond.load745
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6616,6 +6812,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_721: # %cond.load749
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6633,6 +6830,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_202
; CHECK-RV32-NEXT: .LBB61_722: # %cond.load761
; CHECK-RV32-NEXT: lbu a2, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v12, a2
; CHECK-RV32-NEXT: li a2, 192
@@ -6648,6 +6846,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_723: # %cond.load765
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6664,6 +6863,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_724: # %cond.load769
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6680,6 +6880,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_725: # %cond.load773
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6696,6 +6897,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_726: # %cond.load777
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6712,6 +6914,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_727: # %cond.load781
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6728,6 +6931,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_728: # %cond.load785
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6744,6 +6948,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_729: # %cond.load789
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6760,6 +6965,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_730: # %cond.load793
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6776,6 +6982,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_731: # %cond.load797
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6792,6 +6999,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_732: # %cond.load801
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6808,6 +7016,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_733: # %cond.load805
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6824,6 +7033,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_734: # %cond.load809
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6840,6 +7050,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_735: # %cond.load813
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6856,6 +7067,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_736: # %cond.load817
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6872,6 +7084,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_737: # %cond.load821
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6888,6 +7101,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_738: # %cond.load825
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6904,6 +7118,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_739: # %cond.load829
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6920,6 +7135,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_740: # %cond.load833
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6936,6 +7152,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_741: # %cond.load837
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6952,6 +7169,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_742: # %cond.load841
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6968,6 +7186,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_743: # %cond.load845
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6984,6 +7203,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_744: # %cond.load849
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7000,6 +7220,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_745: # %cond.load853
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7016,6 +7237,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_746: # %cond.load857
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7032,6 +7254,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_747: # %cond.load861
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7048,6 +7271,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_748: # %cond.load865
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7064,6 +7288,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_749: # %cond.load869
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7080,6 +7305,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_750: # %cond.load873
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7096,6 +7322,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_751: # %cond.load877
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7113,6 +7340,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_236
; CHECK-RV32-NEXT: .LBB61_752: # %cond.load889
; CHECK-RV32-NEXT: lbu a3, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vmv.s.x v12, a3
; CHECK-RV32-NEXT: li a3, 224
@@ -7128,6 +7356,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_753: # %cond.load893
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7144,6 +7373,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_754: # %cond.load897
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7160,6 +7390,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_755: # %cond.load901
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7176,6 +7407,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_756: # %cond.load905
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7192,6 +7424,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_757: # %cond.load909
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7208,6 +7441,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_758: # %cond.load913
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7224,6 +7458,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_759: # %cond.load917
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7240,6 +7475,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_760: # %cond.load921
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7256,6 +7492,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_761: # %cond.load925
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7272,6 +7509,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_762: # %cond.load929
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7288,6 +7526,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_763: # %cond.load933
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7304,6 +7543,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_764: # %cond.load937
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7320,6 +7560,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_765: # %cond.load941
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7336,6 +7577,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_766: # %cond.load945
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7352,6 +7594,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_767: # %cond.load949
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7368,6 +7611,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_768: # %cond.load953
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7384,6 +7628,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_769: # %cond.load957
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7400,6 +7645,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_770: # %cond.load961
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7416,6 +7662,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_771: # %cond.load965
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7432,6 +7679,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_772: # %cond.load969
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7448,6 +7696,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_773: # %cond.load973
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7464,6 +7713,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_774: # %cond.load977
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7480,6 +7730,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_775: # %cond.load981
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7496,6 +7747,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_776: # %cond.load985
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7512,6 +7764,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_777: # %cond.load989
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7528,6 +7781,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_778: # %cond.load993
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7544,6 +7798,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_779: # %cond.load997
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7560,6 +7815,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_780: # %cond.load1001
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7576,6 +7832,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_781: # %cond.load1005
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7593,6 +7850,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_270
; CHECK-RV32-NEXT: .LBB61_782: # %cond.load1017
; CHECK-RV32-NEXT: lbu a2, 0(a0)
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v12, a2
; CHECK-RV32-NEXT: li a2, 256
@@ -10999,6 +11257,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_62: # %cond.load241
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -11016,6 +11275,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: bgez a1, .LBB61_65
; CHECK-RV64-NEXT: # %bb.64: # %cond.load245
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v24, v8
; CHECK-RV64-NEXT: vmv.s.x v9, a1
; CHECK-RV64-NEXT: li a1, 63
@@ -11280,6 +11540,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_128: # %cond.load497
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -11297,6 +11558,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: bgez a2, .LBB61_131
; CHECK-RV64-NEXT: # %bb.130: # %cond.load501
; CHECK-RV64-NEXT: lbu a2, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v24, v8
; CHECK-RV64-NEXT: vmv.s.x v10, a2
; CHECK-RV64-NEXT: li a2, 127
@@ -11561,6 +11823,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_194: # %cond.load753
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -11578,6 +11841,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: bgez a1, .LBB61_197
; CHECK-RV64-NEXT: # %bb.196: # %cond.load757
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v24, v8
; CHECK-RV64-NEXT: vmv.s.x v12, a1
; CHECK-RV64-NEXT: li a1, 191
@@ -11842,6 +12106,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_260: # %cond.load1009
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -11859,6 +12124,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: bgez a2, .LBB61_263
; CHECK-RV64-NEXT: # %bb.262: # %cond.load1013
; CHECK-RV64-NEXT: lbu a2, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v24, v8
; CHECK-RV64-NEXT: vmv.s.x v12, a2
; CHECK-RV64-NEXT: li a2, 255
@@ -12957,6 +13223,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: ret
; CHECK-RV64-NEXT: .LBB61_527: # %cond.load
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, zero, e8, mf8, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v8, a1
@@ -12968,6 +13235,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_2
; CHECK-RV64-NEXT: .LBB61_528: # %cond.load1
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -12981,6 +13249,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_3
; CHECK-RV64-NEXT: .LBB61_529: # %cond.load5
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 3, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -12993,6 +13262,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_4
; CHECK-RV64-NEXT: .LBB61_530: # %cond.load9
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13005,6 +13275,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_5
; CHECK-RV64-NEXT: .LBB61_531: # %cond.load13
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 5, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13017,6 +13288,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_6
; CHECK-RV64-NEXT: .LBB61_532: # %cond.load17
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 6, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13029,6 +13301,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_7
; CHECK-RV64-NEXT: .LBB61_533: # %cond.load21
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 7, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13041,6 +13314,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_8
; CHECK-RV64-NEXT: .LBB61_534: # %cond.load25
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 8, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13053,6 +13327,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_9
; CHECK-RV64-NEXT: .LBB61_535: # %cond.load29
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 9, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13065,6 +13340,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_10
; CHECK-RV64-NEXT: .LBB61_536: # %cond.load33
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 10, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13077,6 +13353,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_11
; CHECK-RV64-NEXT: .LBB61_537: # %cond.load37
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 11, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13089,6 +13366,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_12
; CHECK-RV64-NEXT: .LBB61_538: # %cond.load41
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 12, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13101,6 +13379,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_13
; CHECK-RV64-NEXT: .LBB61_539: # %cond.load45
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 13, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13113,6 +13392,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_14
; CHECK-RV64-NEXT: .LBB61_540: # %cond.load49
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 14, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13125,6 +13405,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_15
; CHECK-RV64-NEXT: .LBB61_541: # %cond.load53
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 15, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13137,6 +13418,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_16
; CHECK-RV64-NEXT: .LBB61_542: # %cond.load57
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 16, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13149,6 +13431,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_17
; CHECK-RV64-NEXT: .LBB61_543: # %cond.load61
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 17, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13161,6 +13444,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_18
; CHECK-RV64-NEXT: .LBB61_544: # %cond.load65
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 18, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13173,6 +13457,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_19
; CHECK-RV64-NEXT: .LBB61_545: # %cond.load69
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 19, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13185,6 +13470,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_20
; CHECK-RV64-NEXT: .LBB61_546: # %cond.load73
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 20, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13197,6 +13483,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_21
; CHECK-RV64-NEXT: .LBB61_547: # %cond.load77
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 21, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13209,6 +13496,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_22
; CHECK-RV64-NEXT: .LBB61_548: # %cond.load81
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 22, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13221,6 +13509,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_23
; CHECK-RV64-NEXT: .LBB61_549: # %cond.load85
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 23, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13233,6 +13522,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_24
; CHECK-RV64-NEXT: .LBB61_550: # %cond.load89
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 24, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13245,6 +13535,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_25
; CHECK-RV64-NEXT: .LBB61_551: # %cond.load93
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 25, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13257,6 +13548,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_26
; CHECK-RV64-NEXT: .LBB61_552: # %cond.load97
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 26, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13269,6 +13561,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_27
; CHECK-RV64-NEXT: .LBB61_553: # %cond.load101
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 27, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13281,6 +13574,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_28
; CHECK-RV64-NEXT: .LBB61_554: # %cond.load105
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 28, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13293,6 +13587,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_29
; CHECK-RV64-NEXT: .LBB61_555: # %cond.load109
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 29, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13305,6 +13600,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_30
; CHECK-RV64-NEXT: .LBB61_556: # %cond.load113
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 30, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13317,6 +13613,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_31
; CHECK-RV64-NEXT: .LBB61_557: # %cond.load117
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 31, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13330,6 +13627,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_558: # %cond.load121
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13345,6 +13643,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_559: # %cond.load125
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13361,6 +13660,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_560: # %cond.load129
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13377,6 +13677,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_561: # %cond.load133
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13393,6 +13694,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_562: # %cond.load137
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13409,6 +13711,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_563: # %cond.load141
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13425,6 +13728,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_564: # %cond.load145
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13441,6 +13745,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_565: # %cond.load149
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13457,6 +13762,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_566: # %cond.load153
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13473,6 +13779,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_567: # %cond.load157
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13489,6 +13796,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_568: # %cond.load161
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13505,6 +13813,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_569: # %cond.load165
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13521,6 +13830,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_570: # %cond.load169
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13537,6 +13847,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_571: # %cond.load173
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13553,6 +13864,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_572: # %cond.load177
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13569,6 +13881,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_573: # %cond.load181
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13585,6 +13898,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_574: # %cond.load185
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13601,6 +13915,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_575: # %cond.load189
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13617,6 +13932,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_576: # %cond.load193
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13633,6 +13949,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_577: # %cond.load197
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13649,6 +13966,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_578: # %cond.load201
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13665,6 +13983,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_579: # %cond.load205
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13681,6 +14000,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_580: # %cond.load209
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13697,6 +14017,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_581: # %cond.load213
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13713,6 +14034,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_582: # %cond.load217
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13729,6 +14051,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_583: # %cond.load221
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13745,6 +14068,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_584: # %cond.load225
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13761,6 +14085,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_585: # %cond.load229
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13777,6 +14102,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_586: # %cond.load233
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13793,6 +14119,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_587: # %cond.load237
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13810,6 +14137,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_63
; CHECK-RV64-NEXT: .LBB61_588: # %cond.load249
; CHECK-RV64-NEXT: lbu a2, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vmv.s.x v9, a2
; CHECK-RV64-NEXT: li a2, 64
@@ -13825,6 +14153,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_589: # %cond.load253
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -13841,6 +14170,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_590: # %cond.load257
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -13857,6 +14187,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_591: # %cond.load261
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -13873,6 +14204,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_592: # %cond.load265
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -13889,6 +14221,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_593: # %cond.load269
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -13905,6 +14238,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_594: # %cond.load273
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -13921,6 +14255,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_595: # %cond.load277
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -13937,6 +14272,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_596: # %cond.load281
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -13953,6 +14289,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_597: # %cond.load285
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -13969,6 +14306,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_598: # %cond.load289
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -13985,6 +14323,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_599: # %cond.load293
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14001,6 +14340,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_600: # %cond.load297
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14017,6 +14357,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_601: # %cond.load301
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14033,6 +14374,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_602: # %cond.load305
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14049,6 +14391,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_603: # %cond.load309
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14065,6 +14408,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_604: # %cond.load313
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14081,6 +14425,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_605: # %cond.load317
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14097,6 +14442,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_606: # %cond.load321
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14113,6 +14459,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_607: # %cond.load325
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14129,6 +14476,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_608: # %cond.load329
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14145,6 +14493,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_609: # %cond.load333
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14161,6 +14510,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_610: # %cond.load337
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14177,6 +14527,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_611: # %cond.load341
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14193,6 +14544,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_612: # %cond.load345
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14209,6 +14561,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_613: # %cond.load349
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14225,6 +14578,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_614: # %cond.load353
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14241,6 +14595,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_615: # %cond.load357
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14257,6 +14612,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_616: # %cond.load361
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14273,6 +14629,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_617: # %cond.load365
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14289,6 +14646,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_618: # %cond.load369
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14305,6 +14663,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_619: # %cond.load373
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14321,6 +14680,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_620: # %cond.load377
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14337,6 +14697,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_621: # %cond.load381
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14353,6 +14714,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_622: # %cond.load385
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14369,6 +14731,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_623: # %cond.load389
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14385,6 +14748,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_624: # %cond.load393
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14401,6 +14765,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_625: # %cond.load397
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14417,6 +14782,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_626: # %cond.load401
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14433,6 +14799,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_627: # %cond.load405
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14449,6 +14816,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_628: # %cond.load409
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14465,6 +14833,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_629: # %cond.load413
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14481,6 +14850,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_630: # %cond.load417
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14497,6 +14867,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_631: # %cond.load421
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14513,6 +14884,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_632: # %cond.load425
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14529,6 +14901,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_633: # %cond.load429
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14545,6 +14918,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_634: # %cond.load433
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14561,6 +14935,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_635: # %cond.load437
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14577,6 +14952,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_636: # %cond.load441
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14593,6 +14969,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_637: # %cond.load445
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14609,6 +14986,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_638: # %cond.load449
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14625,6 +15003,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_639: # %cond.load453
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14641,6 +15020,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_640: # %cond.load457
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14657,6 +15037,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_641: # %cond.load461
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14673,6 +15054,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_642: # %cond.load465
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14689,6 +15071,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_643: # %cond.load469
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14705,6 +15088,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_644: # %cond.load473
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14721,6 +15105,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_645: # %cond.load477
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14737,6 +15122,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_646: # %cond.load481
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14753,6 +15139,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_647: # %cond.load485
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14769,6 +15156,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_648: # %cond.load489
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14785,6 +15173,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_649: # %cond.load493
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14802,6 +15191,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_129
; CHECK-RV64-NEXT: .LBB61_650: # %cond.load505
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vmv.s.x v10, a1
; CHECK-RV64-NEXT: li a1, 128
@@ -14817,6 +15207,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_651: # %cond.load509
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -14833,6 +15224,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_652: # %cond.load513
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -14849,6 +15241,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_653: # %cond.load517
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -14865,6 +15258,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_654: # %cond.load521
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -14881,6 +15275,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_655: # %cond.load525
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -14897,6 +15292,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_656: # %cond.load529
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -14913,6 +15309,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_657: # %cond.load533
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -14929,6 +15326,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_658: # %cond.load537
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -14945,6 +15343,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_659: # %cond.load541
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -14961,6 +15360,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_660: # %cond.load545
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -14977,6 +15377,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_661: # %cond.load549
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -14993,6 +15394,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_662: # %cond.load553
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15009,6 +15411,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_663: # %cond.load557
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15025,6 +15428,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_664: # %cond.load561
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15041,6 +15445,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_665: # %cond.load565
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15057,6 +15462,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_666: # %cond.load569
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15073,6 +15479,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_667: # %cond.load573
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15089,6 +15496,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_668: # %cond.load577
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15105,6 +15513,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_669: # %cond.load581
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15121,6 +15530,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_670: # %cond.load585
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15137,6 +15547,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_671: # %cond.load589
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15153,6 +15564,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_672: # %cond.load593
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15169,6 +15581,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_673: # %cond.load597
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15185,6 +15598,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_674: # %cond.load601
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15201,6 +15615,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_675: # %cond.load605
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15217,6 +15632,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_676: # %cond.load609
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15233,6 +15649,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_677: # %cond.load613
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15249,6 +15666,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_678: # %cond.load617
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15265,6 +15683,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_679: # %cond.load621
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15281,6 +15700,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_680: # %cond.load625
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15297,6 +15717,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_681: # %cond.load629
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15313,6 +15734,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_682: # %cond.load633
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15329,6 +15751,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_683: # %cond.load637
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15345,6 +15768,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_684: # %cond.load641
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15361,6 +15785,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_685: # %cond.load645
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15377,6 +15802,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_686: # %cond.load649
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15393,6 +15819,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_687: # %cond.load653
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15409,6 +15836,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_688: # %cond.load657
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15425,6 +15853,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_689: # %cond.load661
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15441,6 +15870,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_690: # %cond.load665
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15457,6 +15887,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_691: # %cond.load669
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15473,6 +15904,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_692: # %cond.load673
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15489,6 +15921,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_693: # %cond.load677
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15505,6 +15938,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_694: # %cond.load681
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15521,6 +15955,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_695: # %cond.load685
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15537,6 +15972,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_696: # %cond.load689
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15553,6 +15989,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_697: # %cond.load693
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15569,6 +16006,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_698: # %cond.load697
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15585,6 +16023,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_699: # %cond.load701
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15601,6 +16040,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_700: # %cond.load705
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15617,6 +16057,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_701: # %cond.load709
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15633,6 +16074,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_702: # %cond.load713
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15649,6 +16091,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_703: # %cond.load717
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15665,6 +16108,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_704: # %cond.load721
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15681,6 +16125,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_705: # %cond.load725
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15697,6 +16142,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_706: # %cond.load729
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15713,6 +16159,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_707: # %cond.load733
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15729,6 +16176,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_708: # %cond.load737
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15745,6 +16193,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_709: # %cond.load741
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15761,6 +16210,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_710: # %cond.load745
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15777,6 +16227,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_711: # %cond.load749
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15794,6 +16245,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_195
; CHECK-RV64-NEXT: .LBB61_712: # %cond.load761
; CHECK-RV64-NEXT: lbu a2, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vmv.s.x v12, a2
; CHECK-RV64-NEXT: li a2, 192
@@ -15809,6 +16261,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_713: # %cond.load765
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -15825,6 +16278,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_714: # %cond.load769
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -15841,6 +16295,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_715: # %cond.load773
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -15857,6 +16312,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_716: # %cond.load777
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -15873,6 +16329,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_717: # %cond.load781
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -15889,6 +16346,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_718: # %cond.load785
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -15905,6 +16363,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_719: # %cond.load789
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -15921,6 +16380,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_720: # %cond.load793
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -15937,6 +16397,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_721: # %cond.load797
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -15953,6 +16414,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_722: # %cond.load801
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -15969,6 +16431,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_723: # %cond.load805
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -15985,6 +16448,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_724: # %cond.load809
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16001,6 +16465,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_725: # %cond.load813
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16017,6 +16482,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_726: # %cond.load817
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16033,6 +16499,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_727: # %cond.load821
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16049,6 +16516,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_728: # %cond.load825
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16065,6 +16533,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_729: # %cond.load829
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16081,6 +16550,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_730: # %cond.load833
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16097,6 +16567,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_731: # %cond.load837
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16113,6 +16584,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_732: # %cond.load841
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16129,6 +16601,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_733: # %cond.load845
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16145,6 +16618,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_734: # %cond.load849
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16161,6 +16635,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_735: # %cond.load853
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16177,6 +16652,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_736: # %cond.load857
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16193,6 +16669,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_737: # %cond.load861
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16209,6 +16686,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_738: # %cond.load865
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16225,6 +16703,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_739: # %cond.load869
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16241,6 +16720,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_740: # %cond.load873
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16257,6 +16737,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_741: # %cond.load877
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16273,6 +16754,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_742: # %cond.load881
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16289,6 +16771,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_743: # %cond.load885
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16305,6 +16788,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_744: # %cond.load889
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16321,6 +16805,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_745: # %cond.load893
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16337,6 +16822,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_746: # %cond.load897
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16353,6 +16839,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_747: # %cond.load901
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16369,6 +16856,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_748: # %cond.load905
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16385,6 +16873,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_749: # %cond.load909
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16401,6 +16890,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_750: # %cond.load913
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16417,6 +16907,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_751: # %cond.load917
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16433,6 +16924,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_752: # %cond.load921
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16449,6 +16941,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_753: # %cond.load925
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16465,6 +16958,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_754: # %cond.load929
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16481,6 +16975,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_755: # %cond.load933
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16497,6 +16992,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_756: # %cond.load937
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16513,6 +17009,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_757: # %cond.load941
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16529,6 +17026,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_758: # %cond.load945
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16545,6 +17043,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_759: # %cond.load949
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16561,6 +17060,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_760: # %cond.load953
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16577,6 +17077,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_761: # %cond.load957
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16593,6 +17094,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_762: # %cond.load961
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16609,6 +17111,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_763: # %cond.load965
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16625,6 +17128,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_764: # %cond.load969
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16641,6 +17145,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_765: # %cond.load973
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16657,6 +17162,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_766: # %cond.load977
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16673,6 +17179,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_767: # %cond.load981
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16689,6 +17196,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_768: # %cond.load985
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16705,6 +17213,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_769: # %cond.load989
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16721,6 +17230,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_770: # %cond.load993
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16737,6 +17247,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_771: # %cond.load997
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16753,6 +17264,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_772: # %cond.load1001
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16769,6 +17281,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_773: # %cond.load1005
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16786,6 +17299,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_261
; CHECK-RV64-NEXT: .LBB61_774: # %cond.load1017
; CHECK-RV64-NEXT: lbu a1, 0(a0)
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vmv.s.x v12, a1
; CHECK-RV64-NEXT: li a1, 256
diff --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
index 869478a1efa78d..cf3bf00e307b8e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
@@ -13,6 +13,7 @@ define <vscale x 4 x i32> @extract_nxv8i32_nxv4i32_0(<vscale x 8 x i32> %vec) {
define <vscale x 4 x i32> @extract_nxv8i32_nxv4i32_4(<vscale x 8 x i32> %vec) {
; CHECK-LABEL: extract_nxv8i32_nxv4i32_4:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, i64 4)
@@ -30,6 +31,7 @@ define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_0(<vscale x 8 x i32> %vec) {
define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_2(<vscale x 8 x i32> %vec) {
; CHECK-LABEL: extract_nxv8i32_nxv2i32_2:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 2)
@@ -39,6 +41,7 @@ define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_2(<vscale x 8 x i32> %vec) {
define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_4(<vscale x 8 x i32> %vec) {
; CHECK-LABEL: extract_nxv8i32_nxv2i32_4:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 4)
@@ -48,6 +51,7 @@ define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_4(<vscale x 8 x i32> %vec) {
define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_6(<vscale x 8 x i32> %vec) {
; CHECK-LABEL: extract_nxv8i32_nxv2i32_6:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 6)
@@ -65,6 +69,7 @@ define <vscale x 8 x i32> @extract_nxv16i32_nxv8i32_0(<vscale x 16 x i32> %vec)
define <vscale x 8 x i32> @extract_nxv16i32_nxv8i32_8(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv8i32_8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%c = call <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
@@ -82,6 +87,7 @@ define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_0(<vscale x 16 x i32> %vec)
define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_4(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv4i32_4:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 4)
@@ -91,6 +97,7 @@ define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_4(<vscale x 16 x i32> %vec)
define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_8(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv4i32_8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v12
; CHECK-NEXT: ret
%c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
@@ -100,6 +107,7 @@ define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_8(<vscale x 16 x i32> %vec)
define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_12(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv4i32_12:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: ret
%c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 12)
@@ -117,6 +125,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_0(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_2(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_2:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 2)
@@ -126,6 +135,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_2(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_4(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_4:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 4)
@@ -135,6 +145,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_4(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_6(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_6:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 6)
@@ -144,6 +155,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_6(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_8(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v12
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
@@ -153,6 +165,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_8(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_10(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_10:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 10)
@@ -162,6 +175,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_10(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_12(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_12:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v14
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 12)
@@ -171,6 +185,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_12(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_14(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_14:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v15
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 14)
@@ -224,6 +239,7 @@ define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_15(<vscale x 16 x i32> %vec)
define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_2(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv1i32_2:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 2)
@@ -287,6 +303,7 @@ define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_6(<vscale x 32 x i8> %vec) {
define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_8(<vscale x 32 x i8> %vec) {
; CHECK-LABEL: extract_nxv32i8_nxv2i8_8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 8)
@@ -357,6 +374,7 @@ define <vscale x 2 x half> @extract_nxv2f16_nxv16f16_2(<vscale x 16 x half> %vec
define <vscale x 2 x half> @extract_nxv2f16_nxv16f16_4(<vscale x 16 x half> %vec) {
; CHECK-LABEL: extract_nxv2f16_nxv16f16_4:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%c = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 4)
@@ -504,6 +522,7 @@ define <vscale x 2 x bfloat> @extract_nxv2bf16_nxv16bf16_2(<vscale x 16 x bfloat
define <vscale x 2 x bfloat> @extract_nxv2bf16_nxv16bf16_4(<vscale x 16 x bfloat> %vec) {
; CHECK-LABEL: extract_nxv2bf16_nxv16bf16_4:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%c = call <vscale x 2 x bfloat> @llvm.vector.extract.nxv2bf16.nxv16bf16(<vscale x 16 x bfloat> %vec, i64 4)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
index ce83e2d8a62206..0f6eb79616c1d8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
@@ -16,6 +16,7 @@ define <512 x i8> @single_source(<512 x i8> %a) {
; CHECK-NEXT: addi s0, sp, 1536
; CHECK-NEXT: .cfi_def_cfa s0, 0
; CHECK-NEXT: andi sp, sp, -512
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: li a0, 512
; CHECK-NEXT: addi a1, sp, 512
@@ -104,6 +105,7 @@ define <512 x i8> @two_source(<512 x i8> %a, <512 x i8> %b) {
; CHECK-NEXT: addi s0, sp, 1536
; CHECK-NEXT: .cfi_def_cfa s0, 0
; CHECK-NEXT: andi sp, sp, -512
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: li a0, 512
; CHECK-NEXT: addi a1, sp, 512
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
index 3eb5d36b4896a7..8a1a5931771373 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
@@ -1659,6 +1659,7 @@ define <15 x i64> @vp_bitreverse_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroex
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 24 * vlenb
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv8r.v v24, v8
; RV32-NEXT: lui a2, 1044480
; RV32-NEXT: lui a3, 61681
@@ -2055,6 +2056,7 @@ define <16 x i64> @vp_bitreverse_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroex
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 24 * vlenb
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv8r.v v24, v8
; RV32-NEXT: lui a2, 1044480
; RV32-NEXT: lui a3, 61681
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
index ee953a66a004f3..d9071b3da03caa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
@@ -180,6 +180,7 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; CHECK-NEXT: .cfi_offset ra, -8
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: li a1, 2
; CHECK-NEXT: vmv8r.v v8, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
index 73e148edbe2d67..d2d996ac6e3ea8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
@@ -180,6 +180,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; CHECK-NEXT: .cfi_offset ra, -8
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: li a1, 2
; CHECK-NEXT: vmv8r.v v8, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
index 511242aa677c2a..0e4b02d1690628 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
@@ -194,6 +194,7 @@ define <8 x half> @vp_ceil_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
;
; ZVFHMIN-LABEL: vp_ceil_v8f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -261,6 +262,7 @@ declare <16 x half> @llvm.vp.ceil.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_v16f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -280,6 +282,7 @@ define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %e
;
; ZVFHMIN-LABEL: vp_ceil_v16f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -431,6 +434,7 @@ declare <8 x float> @llvm.vp.ceil.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_ceil_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v8f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -475,6 +479,7 @@ declare <16 x float> @llvm.vp.ceil.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_ceil_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v16f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -561,6 +566,7 @@ declare <4 x double> @llvm.vp.ceil.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_ceil_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v4f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -605,6 +611,7 @@ declare <8 x double> @llvm.vp.ceil.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_ceil_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v8f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -649,6 +656,7 @@ declare <15 x double> @llvm.vp.ceil.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_ceil_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v15f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -693,6 +701,7 @@ declare <16 x double> @llvm.vp.ceil.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_ceil_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v16f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -743,6 +752,7 @@ define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -757,6 +767,7 @@ define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
index 5e73e6df9170c2..46f74b59b96586 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
@@ -1796,6 +1796,7 @@ define <32 x i64> @vp_ctpop_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 24 * vlenb
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv8r.v v24, v16
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: lui a2, 209715
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
index 02e99ea513e69b..ab49c55b79d6e9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
@@ -194,6 +194,7 @@ define <8 x half> @vp_floor_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
;
; ZVFHMIN-LABEL: vp_floor_v8f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -261,6 +262,7 @@ declare <16 x half> @llvm.vp.floor.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_v16f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -280,6 +282,7 @@ define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
;
; ZVFHMIN-LABEL: vp_floor_v16f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -431,6 +434,7 @@ declare <8 x float> @llvm.vp.floor.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_floor_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v8f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -475,6 +479,7 @@ declare <16 x float> @llvm.vp.floor.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_floor_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v16f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -561,6 +566,7 @@ declare <4 x double> @llvm.vp.floor.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_floor_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v4f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -605,6 +611,7 @@ declare <8 x double> @llvm.vp.floor.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_floor_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v8f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -649,6 +656,7 @@ declare <15 x double> @llvm.vp.floor.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_floor_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v15f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -693,6 +701,7 @@ declare <16 x double> @llvm.vp.floor.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_floor_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v16f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -743,6 +752,7 @@ define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -757,6 +767,7 @@ define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
index 9a3838d57a0b07..9f531762d256c4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
@@ -13,6 +13,7 @@ declare <2 x half> @llvm.vp.maximum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32)
define <2 x half> @vfmax_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_v2f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -26,6 +27,7 @@ define <2 x half> @vfmax_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i
;
; ZVFHMIN-LABEL: vfmax_vv_v2f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -83,6 +85,7 @@ declare <4 x half> @llvm.vp.maximum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
define <4 x half> @vfmax_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_v4f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -96,6 +99,7 @@ define <4 x half> @vfmax_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i
;
; ZVFHMIN-LABEL: vfmax_vv_v4f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -153,6 +157,7 @@ declare <8 x half> @llvm.vp.maximum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32)
define <8 x half> @vfmax_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_v8f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -166,6 +171,7 @@ define <8 x half> @vfmax_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i
;
; ZVFHMIN-LABEL: vfmax_vv_v8f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -225,6 +231,7 @@ declare <16 x half> @llvm.vp.maximum.v16f16(<16 x half>, <16 x half>, <16 x i1>,
define <16 x half> @vfmax_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_v16f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -240,6 +247,7 @@ define <16 x half> @vfmax_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1>
;
; ZVFHMIN-LABEL: vfmax_vv_v16f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -299,6 +307,7 @@ declare <2 x float> @llvm.vp.maximum.v2f32(<2 x float>, <2 x float>, <2 x i1>, i
define <2 x float> @vfmax_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v2f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -333,6 +342,7 @@ declare <4 x float> @llvm.vp.maximum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i
define <4 x float> @vfmax_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v4f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -367,6 +377,7 @@ declare <8 x float> @llvm.vp.maximum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i
define <8 x float> @vfmax_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v8f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -403,6 +414,7 @@ declare <16 x float> @llvm.vp.maximum.v16f32(<16 x float>, <16 x float>, <16 x i
define <16 x float> @vfmax_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v16f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -439,6 +451,7 @@ declare <2 x double> @llvm.vp.maximum.v2f64(<2 x double>, <2 x double>, <2 x i1>
define <2 x double> @vfmax_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v2f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -473,6 +486,7 @@ declare <4 x double> @llvm.vp.maximum.v4f64(<4 x double>, <4 x double>, <4 x i1>
define <4 x double> @vfmax_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v4f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -509,6 +523,7 @@ declare <8 x double> @llvm.vp.maximum.v8f64(<8 x double>, <8 x double>, <8 x i1>
define <8 x double> @vfmax_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v8f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -551,6 +566,7 @@ define <16 x double> @vfmax_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
@@ -602,6 +618,7 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
@@ -631,6 +648,7 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB24_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a3, 24
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
index 4a7f888fbced4f..e10f4a88feceef 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
@@ -13,6 +13,7 @@ declare <2 x half> @llvm.vp.minimum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32)
define <2 x half> @vfmin_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_v2f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -26,6 +27,7 @@ define <2 x half> @vfmin_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i
;
; ZVFHMIN-LABEL: vfmin_vv_v2f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -83,6 +85,7 @@ declare <4 x half> @llvm.vp.minimum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
define <4 x half> @vfmin_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_v4f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -96,6 +99,7 @@ define <4 x half> @vfmin_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i
;
; ZVFHMIN-LABEL: vfmin_vv_v4f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -153,6 +157,7 @@ declare <8 x half> @llvm.vp.minimum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32)
define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_v8f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -166,6 +171,7 @@ define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i
;
; ZVFHMIN-LABEL: vfmin_vv_v8f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -225,6 +231,7 @@ declare <16 x half> @llvm.vp.minimum.v16f16(<16 x half>, <16 x half>, <16 x i1>,
define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_v16f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -240,6 +247,7 @@ define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1>
;
; ZVFHMIN-LABEL: vfmin_vv_v16f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -299,6 +307,7 @@ declare <2 x float> @llvm.vp.minimum.v2f32(<2 x float>, <2 x float>, <2 x i1>, i
define <2 x float> @vfmin_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v2f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -333,6 +342,7 @@ declare <4 x float> @llvm.vp.minimum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i
define <4 x float> @vfmin_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v4f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -367,6 +377,7 @@ declare <8 x float> @llvm.vp.minimum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i
define <8 x float> @vfmin_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v8f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -403,6 +414,7 @@ declare <16 x float> @llvm.vp.minimum.v16f32(<16 x float>, <16 x float>, <16 x i
define <16 x float> @vfmin_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v16f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -439,6 +451,7 @@ declare <2 x double> @llvm.vp.minimum.v2f64(<2 x double>, <2 x double>, <2 x i1>
define <2 x double> @vfmin_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v2f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -473,6 +486,7 @@ declare <4 x double> @llvm.vp.minimum.v4f64(<4 x double>, <4 x double>, <4 x i1>
define <4 x double> @vfmin_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v4f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -509,6 +523,7 @@ declare <8 x double> @llvm.vp.minimum.v8f64(<8 x double>, <8 x double>, <8 x i1>
define <8 x double> @vfmin_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v8f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -551,6 +566,7 @@ define <16 x double> @vfmin_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
@@ -602,6 +618,7 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
@@ -631,6 +648,7 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB24_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a3, 24
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
index e4b8e9debad271..72397f64275d6b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
@@ -38,6 +38,7 @@ define <4 x float> @interleave_v2f32(<2 x float> %x, <2 x float> %y) {
define <4 x double> @interleave_v2f64(<2 x double> %x, <2 x double> %y) {
; V128-LABEL: interleave_v2f64:
; V128: # %bb.0:
+; V128-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; V128-NEXT: vmv1r.v v12, v9
; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; V128-NEXT: vid.v v9
@@ -242,6 +243,7 @@ define <64 x float> @interleave_v32f32(<32 x float> %x, <32 x float> %y) {
; V128-NEXT: slli a0, a0, 3
; V128-NEXT: sub sp, sp, a0
; V128-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; V128-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; V128-NEXT: vmv8r.v v24, v16
; V128-NEXT: vmv8r.v v16, v8
; V128-NEXT: vmv8r.v v8, v24
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll
index e64c7c87132eee..88994afc1d7c93 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll
@@ -97,6 +97,7 @@ declare <32 x float> @llvm.vp.fptrunc.v32f64.v32f32(<32 x double>, <32 x i1>, i3
define <32 x float> @vfptrunc_v32f32_v32f64(<32 x double> %a, <32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vfptrunc_v32f32_v32f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll
index a68dc11f3d21e7..d929be2f2371aa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll
@@ -712,6 +712,7 @@ define <16 x i64> @fshl_v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index 1fbc8dfd688c4b..9febb7db0ad08a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -133,6 +133,7 @@ define <vscale x 2 x i32> @insert_nxv8i32_v4i32_0(<vscale x 2 x i32> %vec, <4 x
;
; VLS-LABEL: insert_nxv8i32_v4i32_0:
; VLS: # %bb.0:
+; VLS-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; VLS-NEXT: vmv1r.v v8, v9
; VLS-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v4i32(<vscale x 2 x i32> %vec, <4 x i32> %subvec, i64 0)
@@ -143,6 +144,7 @@ define <vscale x 2 x i32> @insert_nxv8i32_v4i32_0(<vscale x 2 x i32> %vec, <4 x
define <4 x i32> @insert_v4i32_v4i32_0(<4 x i32> %vec, <4 x i32> %subvec) {
; CHECK-LABEL: insert_v4i32_v4i32_0:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vector.insert.v4i32.v4i32(<4 x i32> %vec, <4 x i32> %subvec, i64 0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
index 66af5718fb9dc5..f27497c8ac43b2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
@@ -51,6 +51,7 @@ define <4 x i32> @interleave_v2i32(<2 x i32> %x, <2 x i32> %y) {
define <4 x i64> @interleave_v2i64(<2 x i64> %x, <2 x i64> %y) {
; V128-LABEL: interleave_v2i64:
; V128: # %bb.0:
+; V128-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; V128-NEXT: vmv1r.v v12, v9
; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; V128-NEXT: vid.v v9
@@ -411,6 +412,7 @@ define <64 x i32> @interleave_v32i32(<32 x i32> %x, <32 x i32> %y) {
; V128-NEXT: slli a0, a0, 3
; V128-NEXT: sub sp, sp, a0
; V128-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; V128-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; V128-NEXT: vmv8r.v v24, v16
; V128-NEXT: vmv8r.v v16, v8
; V128-NEXT: vmv8r.v v8, v24
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 6cc3f7e76797bd..d3bc60d39ed1cb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -556,11 +556,13 @@ define <4 x i8> @mgather_truemask_v4i8(<4 x ptr> %ptrs, <4 x i8> %passthru) {
define <4 x i8> @mgather_falsemask_v4i8(<4 x ptr> %ptrs, <4 x i8> %passthru) {
; RV32-LABEL: mgather_falsemask_v4i8:
; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4i8:
; RV64V: # %bb.0:
+; RV64V-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64V-NEXT: vmv1r.v v8, v10
; RV64V-NEXT: ret
;
@@ -733,13 +735,13 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB12_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB12_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB12_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB12_15
; RV64ZVE32F-NEXT: .LBB12_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB12_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB12_16
; RV64ZVE32F-NEXT: .LBB12_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB12_9
@@ -756,14 +758,31 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB12_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a2, .LBB12_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
+; RV64ZVE32F-NEXT: vmv.s.x v10, a2
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, mf2, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6
+; RV64ZVE32F-NEXT: .LBB12_11: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB12_16
-; RV64ZVE32F-NEXT: .LBB12_11: # %else20
+; RV64ZVE32F-NEXT: beqz a1, .LBB12_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
+; RV64ZVE32F-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
+; RV64ZVE32F-NEXT: .LBB12_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB12_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB12_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
@@ -772,7 +791,7 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB12_6
-; RV64ZVE32F-NEXT: .LBB12_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB12_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
@@ -783,7 +802,7 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB12_7
-; RV64ZVE32F-NEXT: .LBB12_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB12_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -793,26 +812,6 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB12_8
; RV64ZVE32F-NEXT: j .LBB12_9
-; RV64ZVE32F-NEXT: .LBB12_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: vmv.s.x v10, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, mf2, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB12_11
-; RV64ZVE32F-NEXT: .LBB12_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lbu a0, 0(a0)
-; RV64ZVE32F-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
-; RV64ZVE32F-NEXT: vmv1r.v v8, v9
-; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i8, ptr %base, <8 x i8> %idxs
%v = call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> %ptrs, i32 1, <8 x i1> %m, <8 x i8> %passthru)
ret <8 x i8> %v
@@ -1253,11 +1252,13 @@ define <4 x i16> @mgather_truemask_v4i16(<4 x ptr> %ptrs, <4 x i16> %passthru) {
define <4 x i16> @mgather_falsemask_v4i16(<4 x ptr> %ptrs, <4 x i16> %passthru) {
; RV32-LABEL: mgather_falsemask_v4i16:
; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4i16:
; RV64V: # %bb.0:
+; RV64V-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64V-NEXT: vmv1r.v v8, v10
; RV64V-NEXT: ret
;
@@ -1435,13 +1436,13 @@ define <8 x i16> @mgather_baseidx_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB23_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB23_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB23_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB23_15
; RV64ZVE32F-NEXT: .LBB23_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB23_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB23_16
; RV64ZVE32F-NEXT: .LBB23_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB23_9
@@ -1460,14 +1461,35 @@ define <8 x i16> @mgather_baseidx_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB23_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a2, .LBB23_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: slli a2, a2, 1
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v10, a2
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6
+; RV64ZVE32F-NEXT: .LBB23_11: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB23_16
-; RV64ZVE32F-NEXT: .LBB23_11: # %else20
+; RV64ZVE32F-NEXT: beqz a1, .LBB23_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: slli a1, a1, 1
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
+; RV64ZVE32F-NEXT: .LBB23_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB23_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB23_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -1478,7 +1500,7 @@ define <8 x i16> @mgather_baseidx_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB23_6
-; RV64ZVE32F-NEXT: .LBB23_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB23_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
@@ -1491,7 +1513,7 @@ define <8 x i16> @mgather_baseidx_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB23_7
-; RV64ZVE32F-NEXT: .LBB23_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB23_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 1
@@ -1504,30 +1526,6 @@ define <8 x i16> @mgather_baseidx_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB23_8
; RV64ZVE32F-NEXT: j .LBB23_9
-; RV64ZVE32F-NEXT: .LBB23_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: slli a2, a2, 1
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v10, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB23_11
-; RV64ZVE32F-NEXT: .LBB23_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: slli a1, a1, 1
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
-; RV64ZVE32F-NEXT: vmv1r.v v8, v9
-; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i8> %idxs
%v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x i16> %passthru)
ret <8 x i16> %v
@@ -1587,13 +1585,13 @@ define <8 x i16> @mgather_baseidx_sext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB24_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB24_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB24_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB24_15
; RV64ZVE32F-NEXT: .LBB24_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB24_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB24_16
; RV64ZVE32F-NEXT: .LBB24_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB24_9
@@ -1612,14 +1610,35 @@ define <8 x i16> @mgather_baseidx_sext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB24_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a2, .LBB24_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: slli a2, a2, 1
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v10, a2
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6
+; RV64ZVE32F-NEXT: .LBB24_11: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB24_16
-; RV64ZVE32F-NEXT: .LBB24_11: # %else20
+; RV64ZVE32F-NEXT: beqz a1, .LBB24_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: slli a1, a1, 1
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
+; RV64ZVE32F-NEXT: .LBB24_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB24_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB24_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -1630,7 +1649,7 @@ define <8 x i16> @mgather_baseidx_sext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB24_6
-; RV64ZVE32F-NEXT: .LBB24_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB24_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
@@ -1643,7 +1662,7 @@ define <8 x i16> @mgather_baseidx_sext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB24_7
-; RV64ZVE32F-NEXT: .LBB24_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB24_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 1
@@ -1656,30 +1675,6 @@ define <8 x i16> @mgather_baseidx_sext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB24_8
; RV64ZVE32F-NEXT: j .LBB24_9
-; RV64ZVE32F-NEXT: .LBB24_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: slli a2, a2, 1
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v10, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB24_11
-; RV64ZVE32F-NEXT: .LBB24_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: slli a1, a1, 1
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
-; RV64ZVE32F-NEXT: vmv1r.v v8, v9
-; RV64ZVE32F-NEXT: ret
%eidxs = sext <8 x i8> %idxs to <8 x i16>
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i16> %eidxs
%v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x i16> %passthru)
@@ -1740,13 +1735,13 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB25_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB25_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB25_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB25_15
; RV64ZVE32F-NEXT: .LBB25_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB25_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB25_16
; RV64ZVE32F-NEXT: .LBB25_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB25_9
@@ -1766,14 +1761,37 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB25_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a2, .LBB25_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: slli a2, a2, 1
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v10, a2
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6
+; RV64ZVE32F-NEXT: .LBB25_11: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB25_16
-; RV64ZVE32F-NEXT: .LBB25_11: # %else20
+; RV64ZVE32F-NEXT: beqz a1, .LBB25_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: andi a1, a1, 255
+; RV64ZVE32F-NEXT: slli a1, a1, 1
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
+; RV64ZVE32F-NEXT: .LBB25_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB25_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB25_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 1
@@ -1785,7 +1803,7 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB25_6
-; RV64ZVE32F-NEXT: .LBB25_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB25_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
@@ -1799,7 +1817,7 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB25_7
-; RV64ZVE32F-NEXT: .LBB25_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB25_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: andi a2, a2, 255
@@ -1813,32 +1831,6 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB25_8
; RV64ZVE32F-NEXT: j .LBB25_9
-; RV64ZVE32F-NEXT: .LBB25_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
-; RV64ZVE32F-NEXT: slli a2, a2, 1
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v10, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB25_11
-; RV64ZVE32F-NEXT: .LBB25_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: andi a1, a1, 255
-; RV64ZVE32F-NEXT: slli a1, a1, 1
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
-; RV64ZVE32F-NEXT: vmv1r.v v8, v9
-; RV64ZVE32F-NEXT: ret
%eidxs = zext <8 x i8> %idxs to <8 x i16>
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i16> %eidxs
%v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x i16> %passthru)
@@ -1896,13 +1888,13 @@ define <8 x i16> @mgather_baseidx_v8i16(ptr %base, <8 x i16> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB26_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB26_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB26_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB26_15
; RV64ZVE32F-NEXT: .LBB26_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB26_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB26_16
; RV64ZVE32F-NEXT: .LBB26_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB26_9
@@ -1920,14 +1912,33 @@ define <8 x i16> @mgather_baseidx_v8i16(ptr %base, <8 x i16> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB26_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a2, .LBB26_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: slli a2, a2, 1
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: vmv.s.x v10, a2
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6
+; RV64ZVE32F-NEXT: .LBB26_11: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB26_16
-; RV64ZVE32F-NEXT: .LBB26_11: # %else20
+; RV64ZVE32F-NEXT: beqz a1, .LBB26_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: slli a1, a1, 1
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
+; RV64ZVE32F-NEXT: .LBB26_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB26_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB26_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -1937,7 +1948,7 @@ define <8 x i16> @mgather_baseidx_v8i16(ptr %base, <8 x i16> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB26_6
-; RV64ZVE32F-NEXT: .LBB26_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB26_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
@@ -1949,7 +1960,7 @@ define <8 x i16> @mgather_baseidx_v8i16(ptr %base, <8 x i16> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB26_7
-; RV64ZVE32F-NEXT: .LBB26_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB26_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 1
@@ -1960,28 +1971,6 @@ define <8 x i16> @mgather_baseidx_v8i16(ptr %base, <8 x i16> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB26_8
; RV64ZVE32F-NEXT: j .LBB26_9
-; RV64ZVE32F-NEXT: .LBB26_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: slli a2, a2, 1
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
-; RV64ZVE32F-NEXT: vmv.s.x v10, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB26_11
-; RV64ZVE32F-NEXT: .LBB26_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: slli a1, a1, 1
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
-; RV64ZVE32F-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
-; RV64ZVE32F-NEXT: vmv1r.v v8, v9
-; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i16> %idxs
%v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x i16> %passthru)
ret <8 x i16> %v
@@ -2311,11 +2300,13 @@ define <4 x i32> @mgather_truemask_v4i32(<4 x ptr> %ptrs, <4 x i32> %passthru) {
define <4 x i32> @mgather_falsemask_v4i32(<4 x ptr> %ptrs, <4 x i32> %passthru) {
; RV32-LABEL: mgather_falsemask_v4i32:
; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4i32:
; RV64V: # %bb.0:
+; RV64V-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64V-NEXT: vmv1r.v v8, v10
; RV64V-NEXT: ret
;
@@ -2492,13 +2483,13 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB35_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB35_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB35_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB35_15
; RV64ZVE32F-NEXT: .LBB35_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB35_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB35_16
; RV64ZVE32F-NEXT: .LBB35_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB35_9
@@ -2517,14 +2508,35 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB35_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a2, .LBB35_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: slli a2, a2, 2
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: lw a2, 0(a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v12, a2
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
+; RV64ZVE32F-NEXT: .LBB35_11: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB35_16
-; RV64ZVE32F-NEXT: .LBB35_11: # %else20
+; RV64ZVE32F-NEXT: beqz a1, .LBB35_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: slli a1, a1, 2
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: lw a0, 0(a0)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
+; RV64ZVE32F-NEXT: .LBB35_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB35_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB35_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -2535,7 +2547,7 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB35_6
-; RV64ZVE32F-NEXT: .LBB35_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB35_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
@@ -2548,7 +2560,7 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB35_7
-; RV64ZVE32F-NEXT: .LBB35_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB35_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: slli a2, a2, 2
@@ -2561,30 +2573,6 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB35_8
; RV64ZVE32F-NEXT: j .LBB35_9
-; RV64ZVE32F-NEXT: .LBB35_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: slli a2, a2, 2
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lw a2, 0(a2)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v12, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB35_11
-; RV64ZVE32F-NEXT: .LBB35_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: slli a1, a1, 2
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lw a0, 0(a0)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
-; RV64ZVE32F-NEXT: vmv2r.v v8, v10
-; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i32, ptr %base, <8 x i8> %idxs
%v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru)
ret <8 x i32> %v
@@ -2643,13 +2631,13 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB36_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB36_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB36_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB36_15
; RV64ZVE32F-NEXT: .LBB36_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB36_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB36_16
; RV64ZVE32F-NEXT: .LBB36_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB36_9
@@ -2668,14 +2656,35 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB36_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a2, .LBB36_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: slli a2, a2, 2
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: lw a2, 0(a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v12, a2
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
+; RV64ZVE32F-NEXT: .LBB36_11: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB36_16
-; RV64ZVE32F-NEXT: .LBB36_11: # %else20
+; RV64ZVE32F-NEXT: beqz a1, .LBB36_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: slli a1, a1, 2
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: lw a0, 0(a0)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
+; RV64ZVE32F-NEXT: .LBB36_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB36_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB36_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -2686,7 +2695,7 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB36_6
-; RV64ZVE32F-NEXT: .LBB36_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB36_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
@@ -2699,7 +2708,7 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB36_7
-; RV64ZVE32F-NEXT: .LBB36_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB36_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: slli a2, a2, 2
@@ -2712,30 +2721,6 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB36_8
; RV64ZVE32F-NEXT: j .LBB36_9
-; RV64ZVE32F-NEXT: .LBB36_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: slli a2, a2, 2
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lw a2, 0(a2)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v12, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB36_11
-; RV64ZVE32F-NEXT: .LBB36_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: slli a1, a1, 2
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lw a0, 0(a0)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
-; RV64ZVE32F-NEXT: vmv2r.v v8, v10
-; RV64ZVE32F-NEXT: ret
%eidxs = sext <8 x i8> %idxs to <8 x i32>
%ptrs = getelementptr inbounds i32, ptr %base, <8 x i32> %eidxs
%v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru)
@@ -2798,13 +2783,13 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB37_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB37_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB37_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB37_15
; RV64ZVE32F-NEXT: .LBB37_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB37_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB37_16
; RV64ZVE32F-NEXT: .LBB37_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB37_9
@@ -2824,14 +2809,37 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB37_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a2, .LBB37_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: slli a2, a2, 2
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: lw a2, 0(a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v12, a2
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
+; RV64ZVE32F-NEXT: .LBB37_11: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB37_16
-; RV64ZVE32F-NEXT: .LBB37_11: # %else20
+; RV64ZVE32F-NEXT: beqz a1, .LBB37_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: andi a1, a1, 255
+; RV64ZVE32F-NEXT: slli a1, a1, 2
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: lw a0, 0(a0)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
+; RV64ZVE32F-NEXT: .LBB37_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB37_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB37_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 2
@@ -2843,7 +2851,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB37_6
-; RV64ZVE32F-NEXT: .LBB37_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB37_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
@@ -2857,7 +2865,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB37_7
-; RV64ZVE32F-NEXT: .LBB37_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB37_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: andi a2, a2, 255
@@ -2871,32 +2879,6 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB37_8
; RV64ZVE32F-NEXT: j .LBB37_9
-; RV64ZVE32F-NEXT: .LBB37_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
-; RV64ZVE32F-NEXT: slli a2, a2, 2
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lw a2, 0(a2)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v12, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB37_11
-; RV64ZVE32F-NEXT: .LBB37_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: andi a1, a1, 255
-; RV64ZVE32F-NEXT: slli a1, a1, 2
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lw a0, 0(a0)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
-; RV64ZVE32F-NEXT: vmv2r.v v8, v10
-; RV64ZVE32F-NEXT: ret
%eidxs = zext <8 x i8> %idxs to <8 x i32>
%ptrs = getelementptr inbounds i32, ptr %base, <8 x i32> %eidxs
%v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru)
@@ -2957,13 +2939,13 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB38_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB38_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB38_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB38_15
; RV64ZVE32F-NEXT: .LBB38_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB38_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB38_16
; RV64ZVE32F-NEXT: .LBB38_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB38_9
@@ -2982,14 +2964,35 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB38_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a2, .LBB38_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: slli a2, a2, 2
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: lw a2, 0(a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v12, a2
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
+; RV64ZVE32F-NEXT: .LBB38_11: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB38_16
-; RV64ZVE32F-NEXT: .LBB38_11: # %else20
+; RV64ZVE32F-NEXT: beqz a1, .LBB38_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: slli a1, a1, 2
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: lw a0, 0(a0)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
+; RV64ZVE32F-NEXT: .LBB38_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB38_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB38_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -3000,7 +3003,7 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB38_6
-; RV64ZVE32F-NEXT: .LBB38_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB38_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
@@ -3013,7 +3016,7 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB38_7
-; RV64ZVE32F-NEXT: .LBB38_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB38_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: slli a2, a2, 2
@@ -3026,30 +3029,6 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB38_8
; RV64ZVE32F-NEXT: j .LBB38_9
-; RV64ZVE32F-NEXT: .LBB38_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: slli a2, a2, 2
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lw a2, 0(a2)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v12, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB38_11
-; RV64ZVE32F-NEXT: .LBB38_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: slli a1, a1, 2
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lw a0, 0(a0)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
-; RV64ZVE32F-NEXT: vmv2r.v v8, v10
-; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i32, ptr %base, <8 x i16> %idxs
%v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru)
ret <8 x i32> %v
@@ -3109,13 +3088,13 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB39_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB39_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB39_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB39_15
; RV64ZVE32F-NEXT: .LBB39_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB39_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB39_16
; RV64ZVE32F-NEXT: .LBB39_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB39_9
@@ -3134,14 +3113,35 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB39_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a2, .LBB39_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: slli a2, a2, 2
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: lw a2, 0(a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v12, a2
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
+; RV64ZVE32F-NEXT: .LBB39_11: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB39_16
-; RV64ZVE32F-NEXT: .LBB39_11: # %else20
+; RV64ZVE32F-NEXT: beqz a1, .LBB39_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: slli a1, a1, 2
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: lw a0, 0(a0)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
+; RV64ZVE32F-NEXT: .LBB39_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB39_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB39_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -3152,7 +3152,7 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB39_6
-; RV64ZVE32F-NEXT: .LBB39_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB39_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
@@ -3165,7 +3165,7 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB39_7
-; RV64ZVE32F-NEXT: .LBB39_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB39_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: slli a2, a2, 2
@@ -3178,30 +3178,6 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB39_8
; RV64ZVE32F-NEXT: j .LBB39_9
-; RV64ZVE32F-NEXT: .LBB39_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: slli a2, a2, 2
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lw a2, 0(a2)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v12, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB39_11
-; RV64ZVE32F-NEXT: .LBB39_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: slli a1, a1, 2
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lw a0, 0(a0)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
-; RV64ZVE32F-NEXT: vmv2r.v v8, v10
-; RV64ZVE32F-NEXT: ret
%eidxs = sext <8 x i16> %idxs to <8 x i32>
%ptrs = getelementptr inbounds i32, ptr %base, <8 x i32> %eidxs
%v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru)
@@ -3265,13 +3241,13 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: andi a3, a2, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a3, .LBB40_12
+; RV64ZVE32F-NEXT: bnez a3, .LBB40_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a3, a2, 8
-; RV64ZVE32F-NEXT: bnez a3, .LBB40_13
+; RV64ZVE32F-NEXT: bnez a3, .LBB40_15
; RV64ZVE32F-NEXT: .LBB40_6: # %else8
; RV64ZVE32F-NEXT: andi a3, a2, 16
-; RV64ZVE32F-NEXT: bnez a3, .LBB40_14
+; RV64ZVE32F-NEXT: bnez a3, .LBB40_16
; RV64ZVE32F-NEXT: .LBB40_7: # %else11
; RV64ZVE32F-NEXT: andi a3, a2, 32
; RV64ZVE32F-NEXT: beqz a3, .LBB40_9
@@ -3291,14 +3267,37 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: andi a3, a2, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
-; RV64ZVE32F-NEXT: bnez a3, .LBB40_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a3, .LBB40_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a3, v8
+; RV64ZVE32F-NEXT: and a3, a3, a1
+; RV64ZVE32F-NEXT: slli a3, a3, 2
+; RV64ZVE32F-NEXT: add a3, a0, a3
+; RV64ZVE32F-NEXT: lw a3, 0(a3)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v12, a3
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
+; RV64ZVE32F-NEXT: .LBB40_11: # %else17
; RV64ZVE32F-NEXT: andi a2, a2, -128
-; RV64ZVE32F-NEXT: bnez a2, .LBB40_16
-; RV64ZVE32F-NEXT: .LBB40_11: # %else20
+; RV64ZVE32F-NEXT: beqz a2, .LBB40_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: and a1, a2, a1
+; RV64ZVE32F-NEXT: slli a1, a1, 2
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: lw a0, 0(a0)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
+; RV64ZVE32F-NEXT: .LBB40_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB40_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB40_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a3, v8
; RV64ZVE32F-NEXT: and a3, a3, a1
; RV64ZVE32F-NEXT: slli a3, a3, 2
@@ -3310,7 +3309,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2
; RV64ZVE32F-NEXT: andi a3, a2, 8
; RV64ZVE32F-NEXT: beqz a3, .LBB40_6
-; RV64ZVE32F-NEXT: .LBB40_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB40_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a3, v8
@@ -3324,7 +3323,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3
; RV64ZVE32F-NEXT: andi a3, a2, 16
; RV64ZVE32F-NEXT: beqz a3, .LBB40_7
-; RV64ZVE32F-NEXT: .LBB40_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB40_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a3, v9
; RV64ZVE32F-NEXT: and a3, a3, a1
@@ -3338,32 +3337,6 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: andi a3, a2, 32
; RV64ZVE32F-NEXT: bnez a3, .LBB40_8
; RV64ZVE32F-NEXT: j .LBB40_9
-; RV64ZVE32F-NEXT: .LBB40_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a3, v8
-; RV64ZVE32F-NEXT: and a3, a3, a1
-; RV64ZVE32F-NEXT: slli a3, a3, 2
-; RV64ZVE32F-NEXT: add a3, a0, a3
-; RV64ZVE32F-NEXT: lw a3, 0(a3)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v12, a3
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
-; RV64ZVE32F-NEXT: andi a2, a2, -128
-; RV64ZVE32F-NEXT: beqz a2, .LBB40_11
-; RV64ZVE32F-NEXT: .LBB40_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: and a1, a2, a1
-; RV64ZVE32F-NEXT: slli a1, a1, 2
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lw a0, 0(a0)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
-; RV64ZVE32F-NEXT: vmv2r.v v8, v10
-; RV64ZVE32F-NEXT: ret
%eidxs = zext <8 x i16> %idxs to <8 x i32>
%ptrs = getelementptr inbounds i32, ptr %base, <8 x i32> %eidxs
%v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru)
@@ -3420,13 +3393,13 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB41_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB41_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB41_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB41_15
; RV64ZVE32F-NEXT: .LBB41_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB41_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB41_16
; RV64ZVE32F-NEXT: .LBB41_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB41_9
@@ -3444,35 +3417,54 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v12, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB41_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB41_16
-; RV64ZVE32F-NEXT: .LBB41_11: # %else20
-; RV64ZVE32F-NEXT: vmv2r.v v8, v10
-; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB41_12: # %cond.load4
+; RV64ZVE32F-NEXT: beqz a2, .LBB41_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lw a2, 0(a2)
-; RV64ZVE32F-NEXT: vmv.s.x v9, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m1, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v9, 2
-; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: beqz a2, .LBB41_6
-; RV64ZVE32F-NEXT: .LBB41_13: # %cond.load7
-; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: slli a2, a2, 2
+; RV64ZVE32F-NEXT: vmv.s.x v12, a2
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
+; RV64ZVE32F-NEXT: .LBB41_11: # %else17
+; RV64ZVE32F-NEXT: andi a1, a1, -128
+; RV64ZVE32F-NEXT: beqz a1, .LBB41_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: slli a1, a1, 2
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: lw a0, 0(a0)
+; RV64ZVE32F-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
+; RV64ZVE32F-NEXT: .LBB41_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vmv2r.v v8, v10
+; RV64ZVE32F-NEXT: ret
+; RV64ZVE32F-NEXT: .LBB41_14: # %cond.load4
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: slli a2, a2, 2
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: lw a2, 0(a2)
+; RV64ZVE32F-NEXT: vmv.s.x v9, a2
+; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m1, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v9, 2
+; RV64ZVE32F-NEXT: andi a2, a1, 8
+; RV64ZVE32F-NEXT: beqz a2, .LBB41_6
+; RV64ZVE32F-NEXT: .LBB41_15: # %cond.load7
+; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lw a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB41_7
-; RV64ZVE32F-NEXT: .LBB41_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB41_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: slli a2, a2, 2
@@ -3483,28 +3475,6 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB41_8
; RV64ZVE32F-NEXT: j .LBB41_9
-; RV64ZVE32F-NEXT: .LBB41_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: slli a2, a2, 2
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lw a2, 0(a2)
-; RV64ZVE32F-NEXT: vmv.s.x v12, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB41_11
-; RV64ZVE32F-NEXT: .LBB41_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: slli a1, a1, 2
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lw a0, 0(a0)
-; RV64ZVE32F-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
-; RV64ZVE32F-NEXT: vmv2r.v v8, v10
-; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i32, ptr %base, <8 x i32> %idxs
%v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru)
ret <8 x i32> %v
@@ -3822,11 +3792,13 @@ define <4 x i64> @mgather_truemask_v4i64(<4 x ptr> %ptrs, <4 x i64> %passthru) {
define <4 x i64> @mgather_falsemask_v4i64(<4 x ptr> %ptrs, <4 x i64> %passthru) {
; RV32V-LABEL: mgather_falsemask_v4i64:
; RV32V: # %bb.0:
+; RV32V-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32V-NEXT: vmv2r.v v8, v10
; RV32V-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4i64:
; RV64V: # %bb.0:
+; RV64V-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64V-NEXT: vmv2r.v v8, v10
; RV64V-NEXT: ret
;
@@ -7113,11 +7085,13 @@ define <4 x bfloat> @mgather_truemask_v4bf16(<4 x ptr> %ptrs, <4 x bfloat> %pass
define <4 x bfloat> @mgather_falsemask_v4bf16(<4 x ptr> %ptrs, <4 x bfloat> %passthru) {
; RV32-LABEL: mgather_falsemask_v4bf16:
; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4bf16:
; RV64V: # %bb.0:
+; RV64V-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64V-NEXT: vmv1r.v v8, v10
; RV64V-NEXT: ret
;
@@ -7295,13 +7269,13 @@ define <8 x bfloat> @mgather_baseidx_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, <8 x
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB64_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB64_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB64_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB64_15
; RV64ZVE32F-NEXT: .LBB64_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB64_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB64_16
; RV64ZVE32F-NEXT: .LBB64_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB64_9
@@ -7320,14 +7294,35 @@ define <8 x bfloat> @mgather_baseidx_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, <8 x
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB64_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a2, .LBB64_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: slli a2, a2, 1
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v10, a2
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6
+; RV64ZVE32F-NEXT: .LBB64_11: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB64_16
-; RV64ZVE32F-NEXT: .LBB64_11: # %else20
+; RV64ZVE32F-NEXT: beqz a1, .LBB64_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: slli a1, a1, 1
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
+; RV64ZVE32F-NEXT: .LBB64_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB64_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB64_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -7338,7 +7333,7 @@ define <8 x bfloat> @mgather_baseidx_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, <8 x
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB64_6
-; RV64ZVE32F-NEXT: .LBB64_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB64_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
@@ -7351,7 +7346,7 @@ define <8 x bfloat> @mgather_baseidx_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, <8 x
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB64_7
-; RV64ZVE32F-NEXT: .LBB64_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB64_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 1
@@ -7364,30 +7359,6 @@ define <8 x bfloat> @mgather_baseidx_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, <8 x
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB64_8
; RV64ZVE32F-NEXT: j .LBB64_9
-; RV64ZVE32F-NEXT: .LBB64_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: slli a2, a2, 1
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v10, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB64_11
-; RV64ZVE32F-NEXT: .LBB64_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: slli a1, a1, 1
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
-; RV64ZVE32F-NEXT: vmv1r.v v8, v9
-; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds bfloat, ptr %base, <8 x i8> %idxs
%v = call <8 x bfloat> @llvm.masked.gather.v8bf16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x bfloat> %passthru)
ret <8 x bfloat> %v
@@ -7447,13 +7418,13 @@ define <8 x bfloat> @mgather_baseidx_sext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB65_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB65_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB65_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB65_15
; RV64ZVE32F-NEXT: .LBB65_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB65_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB65_16
; RV64ZVE32F-NEXT: .LBB65_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB65_9
@@ -7472,14 +7443,35 @@ define <8 x bfloat> @mgather_baseidx_sext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB65_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a2, .LBB65_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: slli a2, a2, 1
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v10, a2
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6
+; RV64ZVE32F-NEXT: .LBB65_11: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB65_16
-; RV64ZVE32F-NEXT: .LBB65_11: # %else20
+; RV64ZVE32F-NEXT: beqz a1, .LBB65_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: slli a1, a1, 1
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
+; RV64ZVE32F-NEXT: .LBB65_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB65_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB65_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -7490,7 +7482,7 @@ define <8 x bfloat> @mgather_baseidx_sext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB65_6
-; RV64ZVE32F-NEXT: .LBB65_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB65_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
@@ -7503,7 +7495,7 @@ define <8 x bfloat> @mgather_baseidx_sext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB65_7
-; RV64ZVE32F-NEXT: .LBB65_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB65_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 1
@@ -7516,30 +7508,6 @@ define <8 x bfloat> @mgather_baseidx_sext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB65_8
; RV64ZVE32F-NEXT: j .LBB65_9
-; RV64ZVE32F-NEXT: .LBB65_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: slli a2, a2, 1
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v10, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB65_11
-; RV64ZVE32F-NEXT: .LBB65_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: slli a1, a1, 1
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
-; RV64ZVE32F-NEXT: vmv1r.v v8, v9
-; RV64ZVE32F-NEXT: ret
%eidxs = sext <8 x i8> %idxs to <8 x i16>
%ptrs = getelementptr inbounds bfloat, ptr %base, <8 x i16> %eidxs
%v = call <8 x bfloat> @llvm.masked.gather.v8bf16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x bfloat> %passthru)
@@ -7600,13 +7568,13 @@ define <8 x bfloat> @mgather_baseidx_zext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB66_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB66_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB66_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB66_15
; RV64ZVE32F-NEXT: .LBB66_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB66_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB66_16
; RV64ZVE32F-NEXT: .LBB66_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB66_9
@@ -7626,14 +7594,37 @@ define <8 x bfloat> @mgather_baseidx_zext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB66_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a2, .LBB66_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: slli a2, a2, 1
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v10, a2
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6
+; RV64ZVE32F-NEXT: .LBB66_11: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB66_16
-; RV64ZVE32F-NEXT: .LBB66_11: # %else20
+; RV64ZVE32F-NEXT: beqz a1, .LBB66_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: andi a1, a1, 255
+; RV64ZVE32F-NEXT: slli a1, a1, 1
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
+; RV64ZVE32F-NEXT: .LBB66_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB66_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB66_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 1
@@ -7645,7 +7636,7 @@ define <8 x bfloat> @mgather_baseidx_zext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB66_6
-; RV64ZVE32F-NEXT: .LBB66_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB66_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
@@ -7659,7 +7650,7 @@ define <8 x bfloat> @mgather_baseidx_zext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB66_7
-; RV64ZVE32F-NEXT: .LBB66_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB66_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: andi a2, a2, 255
@@ -7673,32 +7664,6 @@ define <8 x bfloat> @mgather_baseidx_zext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB66_8
; RV64ZVE32F-NEXT: j .LBB66_9
-; RV64ZVE32F-NEXT: .LBB66_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
-; RV64ZVE32F-NEXT: slli a2, a2, 1
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v10, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB66_11
-; RV64ZVE32F-NEXT: .LBB66_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: andi a1, a1, 255
-; RV64ZVE32F-NEXT: slli a1, a1, 1
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
-; RV64ZVE32F-NEXT: vmv1r.v v8, v9
-; RV64ZVE32F-NEXT: ret
%eidxs = zext <8 x i8> %idxs to <8 x i16>
%ptrs = getelementptr inbounds bfloat, ptr %base, <8 x i16> %eidxs
%v = call <8 x bfloat> @llvm.masked.gather.v8bf16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x bfloat> %passthru)
@@ -7756,13 +7721,13 @@ define <8 x bfloat> @mgather_baseidx_v8bf16(ptr %base, <8 x i16> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB67_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB67_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB67_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB67_15
; RV64ZVE32F-NEXT: .LBB67_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB67_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB67_16
; RV64ZVE32F-NEXT: .LBB67_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB67_9
@@ -7780,14 +7745,33 @@ define <8 x bfloat> @mgather_baseidx_v8bf16(ptr %base, <8 x i16> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB67_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a2, .LBB67_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: slli a2, a2, 1
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-NEXT: vmv.s.x v10, a2
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6
+; RV64ZVE32F-NEXT: .LBB67_11: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB67_16
-; RV64ZVE32F-NEXT: .LBB67_11: # %else20
+; RV64ZVE32F-NEXT: beqz a1, .LBB67_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: slli a1, a1, 1
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
+; RV64ZVE32F-NEXT: .LBB67_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB67_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB67_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -7797,7 +7781,7 @@ define <8 x bfloat> @mgather_baseidx_v8bf16(ptr %base, <8 x i16> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB67_6
-; RV64ZVE32F-NEXT: .LBB67_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB67_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
@@ -7809,7 +7793,7 @@ define <8 x bfloat> @mgather_baseidx_v8bf16(ptr %base, <8 x i16> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB67_7
-; RV64ZVE32F-NEXT: .LBB67_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB67_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 1
@@ -7820,28 +7804,6 @@ define <8 x bfloat> @mgather_baseidx_v8bf16(ptr %base, <8 x i16> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB67_8
; RV64ZVE32F-NEXT: j .LBB67_9
-; RV64ZVE32F-NEXT: .LBB67_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: slli a2, a2, 1
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: lh a2, 0(a2)
-; RV64ZVE32F-NEXT: vmv.s.x v10, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB67_11
-; RV64ZVE32F-NEXT: .LBB67_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: slli a1, a1, 1
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
-; RV64ZVE32F-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
-; RV64ZVE32F-NEXT: vmv1r.v v8, v9
-; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds bfloat, ptr %base, <8 x i16> %idxs
%v = call <8 x bfloat> @llvm.masked.gather.v8bf16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x bfloat> %passthru)
ret <8 x bfloat> %v
@@ -8135,11 +8097,13 @@ define <4 x half> @mgather_truemask_v4f16(<4 x ptr> %ptrs, <4 x half> %passthru)
define <4 x half> @mgather_falsemask_v4f16(<4 x ptr> %ptrs, <4 x half> %passthru) {
; RV32-LABEL: mgather_falsemask_v4f16:
; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4f16:
; RV64V: # %bb.0:
+; RV64V-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64V-NEXT: vmv1r.v v8, v10
; RV64V-NEXT: ret
;
@@ -8410,13 +8374,13 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 4
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB74_12
+; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB74_14
; RV64ZVE32F-ZVFH-NEXT: # %bb.5: # %else5
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 8
-; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB74_13
+; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB74_15
; RV64ZVE32F-ZVFH-NEXT: .LBB74_6: # %else8
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 16
-; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB74_14
+; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB74_16
; RV64ZVE32F-ZVFH-NEXT: .LBB74_7: # %else11
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 32
; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB74_9
@@ -8435,14 +8399,35 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 64
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v10, 2
-; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB74_15
-; RV64ZVE32F-ZVFH-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB74_11
+; RV64ZVE32F-ZVFH-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
+; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
+; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2)
+; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v10, fa5
+; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v10, 6
+; RV64ZVE32F-ZVFH-NEXT: .LBB74_11: # %else17
; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, -128
-; RV64ZVE32F-ZVFH-NEXT: bnez a1, .LBB74_16
-; RV64ZVE32F-ZVFH-NEXT: .LBB74_11: # %else20
+; RV64ZVE32F-ZVFH-NEXT: beqz a1, .LBB74_13
+; RV64ZVE32F-ZVFH-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-ZVFH-NEXT: slli a1, a1, 1
+; RV64ZVE32F-ZVFH-NEXT: add a0, a0, a1
+; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a0)
+; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v8, fa5
+; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7
+; RV64ZVE32F-ZVFH-NEXT: .LBB74_13: # %else20
+; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFH-NEXT: ret
-; RV64ZVE32F-ZVFH-NEXT: .LBB74_12: # %cond.load4
+; RV64ZVE32F-ZVFH-NEXT: .LBB74_14: # %cond.load4
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
@@ -8453,7 +8438,7 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v11, 2
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 8
; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB74_6
-; RV64ZVE32F-ZVFH-NEXT: .LBB74_13: # %cond.load7
+; RV64ZVE32F-ZVFH-NEXT: .LBB74_15: # %cond.load7
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
@@ -8466,7 +8451,7 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 3
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 16
; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB74_7
-; RV64ZVE32F-ZVFH-NEXT: .LBB74_14: # %cond.load10
+; RV64ZVE32F-ZVFH-NEXT: .LBB74_16: # %cond.load10
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
@@ -8479,30 +8464,6 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 32
; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB74_8
; RV64ZVE32F-ZVFH-NEXT: j .LBB74_9
-; RV64ZVE32F-ZVFH-NEXT: .LBB74_15: # %cond.load16
-; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
-; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
-; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2)
-; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v10, fa5
-; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v10, 6
-; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, -128
-; RV64ZVE32F-ZVFH-NEXT: beqz a1, .LBB74_11
-; RV64ZVE32F-ZVFH-NEXT: .LBB74_16: # %cond.load19
-; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-ZVFH-NEXT: slli a1, a1, 1
-; RV64ZVE32F-ZVFH-NEXT: add a0, a0, a1
-; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a0)
-; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v8, fa5
-; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7
-; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9
-; RV64ZVE32F-ZVFH-NEXT: ret
;
; RV64ZVE32F-ZVFHMIN-LABEL: mgather_baseidx_v8i8_v8f16:
; RV64ZVE32F-ZVFHMIN: # %bb.0:
@@ -8537,13 +8498,13 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 4
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB74_12
+; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB74_14
; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.5: # %else5
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 8
-; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB74_13
+; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB74_15
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_6: # %else8
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 16
-; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB74_14
+; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB74_16
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_7: # %else11
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 32
; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB74_9
@@ -8562,14 +8523,35 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 64
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v10, 2
-; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB74_15
-; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB74_11
+; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
+; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
+; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v10, a2
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v10, 6
+; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_11: # %else17
; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, -128
-; RV64ZVE32F-ZVFHMIN-NEXT: bnez a1, .LBB74_16
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_11: # %else20
+; RV64ZVE32F-ZVFHMIN-NEXT: beqz a1, .LBB74_13
+; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-ZVFHMIN-NEXT: slli a1, a1, 1
+; RV64ZVE32F-ZVFHMIN-NEXT: add a0, a0, a1
+; RV64ZVE32F-ZVFHMIN-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7
+; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_13: # %else20
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFHMIN-NEXT: ret
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_12: # %cond.load4
+; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_14: # %cond.load4
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
@@ -8580,7 +8562,7 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v11, 2
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 8
; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB74_6
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_13: # %cond.load7
+; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_15: # %cond.load7
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
@@ -8593,7 +8575,7 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 3
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 16
; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB74_7
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_14: # %cond.load10
+; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_16: # %cond.load10
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
@@ -8606,30 +8588,6 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 32
; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB74_8
; RV64ZVE32F-ZVFHMIN-NEXT: j .LBB74_9
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_15: # %cond.load16
-; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
-; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
-; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2)
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v10, a2
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v10, 6
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, -128
-; RV64ZVE32F-ZVFHMIN-NEXT: beqz a1, .LBB74_11
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_16: # %cond.load19
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-ZVFHMIN-NEXT: slli a1, a1, 1
-; RV64ZVE32F-ZVFHMIN-NEXT: add a0, a0, a1
-; RV64ZVE32F-ZVFHMIN-NEXT: lh a0, 0(a0)
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7
-; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9
-; RV64ZVE32F-ZVFHMIN-NEXT: ret
%ptrs = getelementptr inbounds half, ptr %base, <8 x i8> %idxs
%v = call <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x half> %passthru)
ret <8 x half> %v
@@ -8689,13 +8647,13 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 4
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB75_12
+; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB75_14
; RV64ZVE32F-ZVFH-NEXT: # %bb.5: # %else5
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 8
-; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB75_13
+; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB75_15
; RV64ZVE32F-ZVFH-NEXT: .LBB75_6: # %else8
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 16
-; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB75_14
+; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB75_16
; RV64ZVE32F-ZVFH-NEXT: .LBB75_7: # %else11
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 32
; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB75_9
@@ -8714,14 +8672,35 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 64
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v10, 2
-; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB75_15
-; RV64ZVE32F-ZVFH-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB75_11
+; RV64ZVE32F-ZVFH-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
+; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
+; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2)
+; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v10, fa5
+; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v10, 6
+; RV64ZVE32F-ZVFH-NEXT: .LBB75_11: # %else17
; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, -128
-; RV64ZVE32F-ZVFH-NEXT: bnez a1, .LBB75_16
-; RV64ZVE32F-ZVFH-NEXT: .LBB75_11: # %else20
+; RV64ZVE32F-ZVFH-NEXT: beqz a1, .LBB75_13
+; RV64ZVE32F-ZVFH-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-ZVFH-NEXT: slli a1, a1, 1
+; RV64ZVE32F-ZVFH-NEXT: add a0, a0, a1
+; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a0)
+; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v8, fa5
+; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7
+; RV64ZVE32F-ZVFH-NEXT: .LBB75_13: # %else20
+; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFH-NEXT: ret
-; RV64ZVE32F-ZVFH-NEXT: .LBB75_12: # %cond.load4
+; RV64ZVE32F-ZVFH-NEXT: .LBB75_14: # %cond.load4
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
@@ -8732,7 +8711,7 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v11, 2
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 8
; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB75_6
-; RV64ZVE32F-ZVFH-NEXT: .LBB75_13: # %cond.load7
+; RV64ZVE32F-ZVFH-NEXT: .LBB75_15: # %cond.load7
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
@@ -8745,7 +8724,7 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 3
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 16
; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB75_7
-; RV64ZVE32F-ZVFH-NEXT: .LBB75_14: # %cond.load10
+; RV64ZVE32F-ZVFH-NEXT: .LBB75_16: # %cond.load10
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
@@ -8758,30 +8737,6 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 32
; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB75_8
; RV64ZVE32F-ZVFH-NEXT: j .LBB75_9
-; RV64ZVE32F-ZVFH-NEXT: .LBB75_15: # %cond.load16
-; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
-; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
-; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2)
-; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v10, fa5
-; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v10, 6
-; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, -128
-; RV64ZVE32F-ZVFH-NEXT: beqz a1, .LBB75_11
-; RV64ZVE32F-ZVFH-NEXT: .LBB75_16: # %cond.load19
-; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-ZVFH-NEXT: slli a1, a1, 1
-; RV64ZVE32F-ZVFH-NEXT: add a0, a0, a1
-; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a0)
-; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v8, fa5
-; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7
-; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9
-; RV64ZVE32F-ZVFH-NEXT: ret
;
; RV64ZVE32F-ZVFHMIN-LABEL: mgather_baseidx_sext_v8i8_v8f16:
; RV64ZVE32F-ZVFHMIN: # %bb.0:
@@ -8816,13 +8771,13 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 4
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB75_12
+; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB75_14
; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.5: # %else5
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 8
-; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB75_13
+; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB75_15
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_6: # %else8
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 16
-; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB75_14
+; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB75_16
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_7: # %else11
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 32
; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB75_9
@@ -8841,14 +8796,35 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 64
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v10, 2
-; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB75_15
-; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB75_11
+; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
+; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
+; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v10, a2
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v10, 6
+; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_11: # %else17
; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, -128
-; RV64ZVE32F-ZVFHMIN-NEXT: bnez a1, .LBB75_16
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_11: # %else20
+; RV64ZVE32F-ZVFHMIN-NEXT: beqz a1, .LBB75_13
+; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-ZVFHMIN-NEXT: slli a1, a1, 1
+; RV64ZVE32F-ZVFHMIN-NEXT: add a0, a0, a1
+; RV64ZVE32F-ZVFHMIN-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7
+; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_13: # %else20
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFHMIN-NEXT: ret
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_12: # %cond.load4
+; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_14: # %cond.load4
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
@@ -8859,7 +8835,7 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v11, 2
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 8
; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB75_6
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_13: # %cond.load7
+; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_15: # %cond.load7
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
@@ -8872,7 +8848,7 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 3
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 16
; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB75_7
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_14: # %cond.load10
+; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_16: # %cond.load10
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
@@ -8885,30 +8861,6 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 32
; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB75_8
; RV64ZVE32F-ZVFHMIN-NEXT: j .LBB75_9
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_15: # %cond.load16
-; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
-; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
-; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2)
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v10, a2
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v10, 6
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, -128
-; RV64ZVE32F-ZVFHMIN-NEXT: beqz a1, .LBB75_11
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_16: # %cond.load19
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-ZVFHMIN-NEXT: slli a1, a1, 1
-; RV64ZVE32F-ZVFHMIN-NEXT: add a0, a0, a1
-; RV64ZVE32F-ZVFHMIN-NEXT: lh a0, 0(a0)
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7
-; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9
-; RV64ZVE32F-ZVFHMIN-NEXT: ret
%eidxs = sext <8 x i8> %idxs to <8 x i16>
%ptrs = getelementptr inbounds half, ptr %base, <8 x i16> %eidxs
%v = call <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x half> %passthru)
@@ -8969,13 +8921,13 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 4
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB76_12
+; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB76_14
; RV64ZVE32F-ZVFH-NEXT: # %bb.5: # %else5
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 8
-; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB76_13
+; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB76_15
; RV64ZVE32F-ZVFH-NEXT: .LBB76_6: # %else8
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 16
-; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB76_14
+; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB76_16
; RV64ZVE32F-ZVFH-NEXT: .LBB76_7: # %else11
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 32
; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB76_9
@@ -8995,14 +8947,37 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 64
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v10, 2
-; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB76_15
-; RV64ZVE32F-ZVFH-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB76_11
+; RV64ZVE32F-ZVFH-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
+; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
+; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2)
+; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v10, fa5
+; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v10, 6
+; RV64ZVE32F-ZVFH-NEXT: .LBB76_11: # %else17
; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, -128
-; RV64ZVE32F-ZVFH-NEXT: bnez a1, .LBB76_16
-; RV64ZVE32F-ZVFH-NEXT: .LBB76_11: # %else20
+; RV64ZVE32F-ZVFH-NEXT: beqz a1, .LBB76_13
+; RV64ZVE32F-ZVFH-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, 255
+; RV64ZVE32F-ZVFH-NEXT: slli a1, a1, 1
+; RV64ZVE32F-ZVFH-NEXT: add a0, a0, a1
+; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a0)
+; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v8, fa5
+; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7
+; RV64ZVE32F-ZVFH-NEXT: .LBB76_13: # %else20
+; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFH-NEXT: ret
-; RV64ZVE32F-ZVFH-NEXT: .LBB76_12: # %cond.load4
+; RV64ZVE32F-ZVFH-NEXT: .LBB76_14: # %cond.load4
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
@@ -9014,7 +8989,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v11, 2
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 8
; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB76_6
-; RV64ZVE32F-ZVFH-NEXT: .LBB76_13: # %cond.load7
+; RV64ZVE32F-ZVFH-NEXT: .LBB76_15: # %cond.load7
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
@@ -9028,7 +9003,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 3
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 16
; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB76_7
-; RV64ZVE32F-ZVFH-NEXT: .LBB76_14: # %cond.load10
+; RV64ZVE32F-ZVFH-NEXT: .LBB76_16: # %cond.load10
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255
@@ -9042,32 +9017,6 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 32
; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB76_8
; RV64ZVE32F-ZVFH-NEXT: j .LBB76_9
-; RV64ZVE32F-ZVFH-NEXT: .LBB76_15: # %cond.load16
-; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255
-; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
-; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
-; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2)
-; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v10, fa5
-; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v10, 6
-; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, -128
-; RV64ZVE32F-ZVFH-NEXT: beqz a1, .LBB76_11
-; RV64ZVE32F-ZVFH-NEXT: .LBB76_16: # %cond.load19
-; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, 255
-; RV64ZVE32F-ZVFH-NEXT: slli a1, a1, 1
-; RV64ZVE32F-ZVFH-NEXT: add a0, a0, a1
-; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a0)
-; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v8, fa5
-; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7
-; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9
-; RV64ZVE32F-ZVFH-NEXT: ret
;
; RV64ZVE32F-ZVFHMIN-LABEL: mgather_baseidx_zext_v8i8_v8f16:
; RV64ZVE32F-ZVFHMIN: # %bb.0:
@@ -9104,13 +9053,13 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 4
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB76_12
+; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB76_14
; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.5: # %else5
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 8
-; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB76_13
+; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB76_15
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_6: # %else8
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 16
-; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB76_14
+; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB76_16
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_7: # %else11
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 32
; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB76_9
@@ -9130,14 +9079,37 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 64
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v10, 2
-; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB76_15
-; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB76_11
+; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
+; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
+; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v10, a2
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v10, 6
+; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_11: # %else17
; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, -128
-; RV64ZVE32F-ZVFHMIN-NEXT: bnez a1, .LBB76_16
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_11: # %else20
+; RV64ZVE32F-ZVFHMIN-NEXT: beqz a1, .LBB76_13
+; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, 255
+; RV64ZVE32F-ZVFHMIN-NEXT: slli a1, a1, 1
+; RV64ZVE32F-ZVFHMIN-NEXT: add a0, a0, a1
+; RV64ZVE32F-ZVFHMIN-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7
+; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_13: # %else20
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFHMIN-NEXT: ret
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_12: # %cond.load4
+; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_14: # %cond.load4
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
@@ -9149,7 +9121,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v11, 2
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 8
; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB76_6
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_13: # %cond.load7
+; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_15: # %cond.load7
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
@@ -9163,7 +9135,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 3
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 16
; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB76_7
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_14: # %cond.load10
+; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_16: # %cond.load10
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255
@@ -9177,32 +9149,6 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 32
; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB76_8
; RV64ZVE32F-ZVFHMIN-NEXT: j .LBB76_9
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_15: # %cond.load16
-; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255
-; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
-; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
-; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2)
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v10, a2
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v10, 6
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, -128
-; RV64ZVE32F-ZVFHMIN-NEXT: beqz a1, .LBB76_11
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_16: # %cond.load19
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, 255
-; RV64ZVE32F-ZVFHMIN-NEXT: slli a1, a1, 1
-; RV64ZVE32F-ZVFHMIN-NEXT: add a0, a0, a1
-; RV64ZVE32F-ZVFHMIN-NEXT: lh a0, 0(a0)
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7
-; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9
-; RV64ZVE32F-ZVFHMIN-NEXT: ret
%eidxs = zext <8 x i8> %idxs to <8 x i16>
%ptrs = getelementptr inbounds half, ptr %base, <8 x i16> %eidxs
%v = call <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x half> %passthru)
@@ -9260,13 +9206,13 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 4
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB77_12
+; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB77_14
; RV64ZVE32F-ZVFH-NEXT: # %bb.5: # %else5
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 8
-; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB77_13
+; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB77_15
; RV64ZVE32F-ZVFH-NEXT: .LBB77_6: # %else8
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 16
-; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB77_14
+; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB77_16
; RV64ZVE32F-ZVFH-NEXT: .LBB77_7: # %else11
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 32
; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB77_9
@@ -9284,14 +9230,33 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 64
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v10, 2
-; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB77_15
-; RV64ZVE32F-ZVFH-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB77_11
+; RV64ZVE32F-ZVFH-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
+; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
+; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2)
+; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v10, fa5
+; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v10, 6
+; RV64ZVE32F-ZVFH-NEXT: .LBB77_11: # %else17
; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, -128
-; RV64ZVE32F-ZVFH-NEXT: bnez a1, .LBB77_16
-; RV64ZVE32F-ZVFH-NEXT: .LBB77_11: # %else20
+; RV64ZVE32F-ZVFH-NEXT: beqz a1, .LBB77_13
+; RV64ZVE32F-ZVFH-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-ZVFH-NEXT: slli a1, a1, 1
+; RV64ZVE32F-ZVFH-NEXT: add a0, a0, a1
+; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a0)
+; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v8, fa5
+; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7
+; RV64ZVE32F-ZVFH-NEXT: .LBB77_13: # %else20
+; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFH-NEXT: ret
-; RV64ZVE32F-ZVFH-NEXT: .LBB77_12: # %cond.load4
+; RV64ZVE32F-ZVFH-NEXT: .LBB77_14: # %cond.load4
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
@@ -9301,7 +9266,7 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v11, 2
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 8
; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB77_6
-; RV64ZVE32F-ZVFH-NEXT: .LBB77_13: # %cond.load7
+; RV64ZVE32F-ZVFH-NEXT: .LBB77_15: # %cond.load7
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
@@ -9313,7 +9278,7 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 3
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 16
; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB77_7
-; RV64ZVE32F-ZVFH-NEXT: .LBB77_14: # %cond.load10
+; RV64ZVE32F-ZVFH-NEXT: .LBB77_16: # %cond.load10
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 5, e16, m1, tu, ma
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
@@ -9324,28 +9289,6 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
; RV64ZVE32F-ZVFH-NEXT: andi a2, a1, 32
; RV64ZVE32F-ZVFH-NEXT: bnez a2, .LBB77_8
; RV64ZVE32F-ZVFH-NEXT: j .LBB77_9
-; RV64ZVE32F-ZVFH-NEXT: .LBB77_15: # %cond.load16
-; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
-; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
-; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2)
-; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v10, fa5
-; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v10, 6
-; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, -128
-; RV64ZVE32F-ZVFH-NEXT: beqz a1, .LBB77_11
-; RV64ZVE32F-ZVFH-NEXT: .LBB77_16: # %cond.load19
-; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-ZVFH-NEXT: slli a1, a1, 1
-; RV64ZVE32F-ZVFH-NEXT: add a0, a0, a1
-; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a0)
-; RV64ZVE32F-ZVFH-NEXT: vfmv.s.f v8, fa5
-; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7
-; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9
-; RV64ZVE32F-ZVFH-NEXT: ret
;
; RV64ZVE32F-ZVFHMIN-LABEL: mgather_baseidx_v8f16:
; RV64ZVE32F-ZVFHMIN: # %bb.0:
@@ -9379,13 +9322,13 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 4
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB77_12
+; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB77_14
; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.5: # %else5
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 8
-; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB77_13
+; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB77_15
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_6: # %else8
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 16
-; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB77_14
+; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB77_16
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_7: # %else11
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 32
; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB77_9
@@ -9403,14 +9346,33 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 64
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v10, 2
-; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB77_15
-; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB77_11
+; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
+; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
+; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2)
+; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v10, a2
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v10, 6
+; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_11: # %else17
; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, -128
-; RV64ZVE32F-ZVFHMIN-NEXT: bnez a1, .LBB77_16
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_11: # %else20
+; RV64ZVE32F-ZVFHMIN-NEXT: beqz a1, .LBB77_13
+; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-ZVFHMIN-NEXT: slli a1, a1, 1
+; RV64ZVE32F-ZVFHMIN-NEXT: add a0, a0, a1
+; RV64ZVE32F-ZVFHMIN-NEXT: lh a0, 0(a0)
+; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7
+; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_13: # %else20
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFHMIN-NEXT: ret
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_12: # %cond.load4
+; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_14: # %cond.load4
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
@@ -9420,7 +9382,7 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v11, 2
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 8
; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB77_6
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_13: # %cond.load7
+; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_15: # %cond.load7
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
@@ -9432,7 +9394,7 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 3
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 16
; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB77_7
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_14: # %cond.load10
+; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_16: # %cond.load10
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 5, e16, m1, tu, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
@@ -9443,28 +9405,6 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a1, 32
; RV64ZVE32F-ZVFHMIN-NEXT: bnez a2, .LBB77_8
; RV64ZVE32F-ZVFHMIN-NEXT: j .LBB77_9
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_15: # %cond.load16
-; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
-; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
-; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2)
-; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v10, a2
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v10, 6
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, -128
-; RV64ZVE32F-ZVFHMIN-NEXT: beqz a1, .LBB77_11
-; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_16: # %cond.load19
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-ZVFHMIN-NEXT: slli a1, a1, 1
-; RV64ZVE32F-ZVFHMIN-NEXT: add a0, a0, a1
-; RV64ZVE32F-ZVFHMIN-NEXT: lh a0, 0(a0)
-; RV64ZVE32F-ZVFHMIN-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7
-; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9
-; RV64ZVE32F-ZVFHMIN-NEXT: ret
%ptrs = getelementptr inbounds half, ptr %base, <8 x i16> %idxs
%v = call <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %m, <8 x half> %passthru)
ret <8 x half> %v
@@ -9666,11 +9606,13 @@ define <4 x float> @mgather_truemask_v4f32(<4 x ptr> %ptrs, <4 x float> %passthr
define <4 x float> @mgather_falsemask_v4f32(<4 x ptr> %ptrs, <4 x float> %passthru) {
; RV32-LABEL: mgather_falsemask_v4f32:
; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4f32:
; RV64V: # %bb.0:
+; RV64V-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64V-NEXT: vmv1r.v v8, v10
; RV64V-NEXT: ret
;
@@ -9847,13 +9789,13 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB84_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB84_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB84_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB84_15
; RV64ZVE32F-NEXT: .LBB84_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB84_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB84_16
; RV64ZVE32F-NEXT: .LBB84_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB84_9
@@ -9872,14 +9814,35 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB84_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a2, .LBB84_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: slli a2, a2, 2
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: flw fa5, 0(a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
+; RV64ZVE32F-NEXT: .LBB84_11: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB84_16
-; RV64ZVE32F-NEXT: .LBB84_11: # %else20
+; RV64ZVE32F-NEXT: beqz a1, .LBB84_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: slli a1, a1, 2
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: flw fa5, 0(a0)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
+; RV64ZVE32F-NEXT: .LBB84_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB84_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB84_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -9890,7 +9853,7 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB84_6
-; RV64ZVE32F-NEXT: .LBB84_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB84_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
@@ -9903,7 +9866,7 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB84_7
-; RV64ZVE32F-NEXT: .LBB84_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB84_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: slli a2, a2, 2
@@ -9916,30 +9879,6 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB84_8
; RV64ZVE32F-NEXT: j .LBB84_9
-; RV64ZVE32F-NEXT: .LBB84_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: slli a2, a2, 2
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: flw fa5, 0(a2)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB84_11
-; RV64ZVE32F-NEXT: .LBB84_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: slli a1, a1, 2
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: flw fa5, 0(a0)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
-; RV64ZVE32F-NEXT: vmv2r.v v8, v10
-; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds float, ptr %base, <8 x i8> %idxs
%v = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru)
ret <8 x float> %v
@@ -9998,13 +9937,13 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB85_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB85_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB85_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB85_15
; RV64ZVE32F-NEXT: .LBB85_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB85_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB85_16
; RV64ZVE32F-NEXT: .LBB85_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB85_9
@@ -10023,14 +9962,35 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB85_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a2, .LBB85_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: slli a2, a2, 2
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: flw fa5, 0(a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
+; RV64ZVE32F-NEXT: .LBB85_11: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB85_16
-; RV64ZVE32F-NEXT: .LBB85_11: # %else20
+; RV64ZVE32F-NEXT: beqz a1, .LBB85_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: slli a1, a1, 2
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: flw fa5, 0(a0)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
+; RV64ZVE32F-NEXT: .LBB85_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB85_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB85_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -10041,7 +10001,7 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB85_6
-; RV64ZVE32F-NEXT: .LBB85_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB85_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
@@ -10054,7 +10014,7 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB85_7
-; RV64ZVE32F-NEXT: .LBB85_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB85_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: slli a2, a2, 2
@@ -10067,30 +10027,6 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB85_8
; RV64ZVE32F-NEXT: j .LBB85_9
-; RV64ZVE32F-NEXT: .LBB85_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: slli a2, a2, 2
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: flw fa5, 0(a2)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB85_11
-; RV64ZVE32F-NEXT: .LBB85_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: slli a1, a1, 2
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: flw fa5, 0(a0)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
-; RV64ZVE32F-NEXT: vmv2r.v v8, v10
-; RV64ZVE32F-NEXT: ret
%eidxs = sext <8 x i8> %idxs to <8 x i32>
%ptrs = getelementptr inbounds float, ptr %base, <8 x i32> %eidxs
%v = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru)
@@ -10153,13 +10089,13 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB86_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB86_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB86_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB86_15
; RV64ZVE32F-NEXT: .LBB86_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB86_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB86_16
; RV64ZVE32F-NEXT: .LBB86_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB86_9
@@ -10179,14 +10115,37 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB86_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a2, .LBB86_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: slli a2, a2, 2
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: flw fa5, 0(a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
+; RV64ZVE32F-NEXT: .LBB86_11: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB86_16
-; RV64ZVE32F-NEXT: .LBB86_11: # %else20
+; RV64ZVE32F-NEXT: beqz a1, .LBB86_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: andi a1, a1, 255
+; RV64ZVE32F-NEXT: slli a1, a1, 2
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: flw fa5, 0(a0)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
+; RV64ZVE32F-NEXT: .LBB86_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB86_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB86_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 2
@@ -10198,7 +10157,7 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB86_6
-; RV64ZVE32F-NEXT: .LBB86_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB86_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
@@ -10212,7 +10171,7 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB86_7
-; RV64ZVE32F-NEXT: .LBB86_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB86_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: andi a2, a2, 255
@@ -10226,32 +10185,6 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB86_8
; RV64ZVE32F-NEXT: j .LBB86_9
-; RV64ZVE32F-NEXT: .LBB86_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
-; RV64ZVE32F-NEXT: slli a2, a2, 2
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: flw fa5, 0(a2)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB86_11
-; RV64ZVE32F-NEXT: .LBB86_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: andi a1, a1, 255
-; RV64ZVE32F-NEXT: slli a1, a1, 2
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: flw fa5, 0(a0)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
-; RV64ZVE32F-NEXT: vmv2r.v v8, v10
-; RV64ZVE32F-NEXT: ret
%eidxs = zext <8 x i8> %idxs to <8 x i32>
%ptrs = getelementptr inbounds float, ptr %base, <8 x i32> %eidxs
%v = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru)
@@ -10312,13 +10245,13 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB87_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB87_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB87_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB87_15
; RV64ZVE32F-NEXT: .LBB87_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB87_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB87_16
; RV64ZVE32F-NEXT: .LBB87_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB87_9
@@ -10337,14 +10270,35 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB87_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a2, .LBB87_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: slli a2, a2, 2
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: flw fa5, 0(a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
+; RV64ZVE32F-NEXT: .LBB87_11: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB87_16
-; RV64ZVE32F-NEXT: .LBB87_11: # %else20
+; RV64ZVE32F-NEXT: beqz a1, .LBB87_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: slli a1, a1, 2
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: flw fa5, 0(a0)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
+; RV64ZVE32F-NEXT: .LBB87_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB87_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB87_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -10355,7 +10309,7 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB87_6
-; RV64ZVE32F-NEXT: .LBB87_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB87_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
@@ -10368,7 +10322,7 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB87_7
-; RV64ZVE32F-NEXT: .LBB87_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB87_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: slli a2, a2, 2
@@ -10381,30 +10335,6 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB87_8
; RV64ZVE32F-NEXT: j .LBB87_9
-; RV64ZVE32F-NEXT: .LBB87_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: slli a2, a2, 2
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: flw fa5, 0(a2)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB87_11
-; RV64ZVE32F-NEXT: .LBB87_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: slli a1, a1, 2
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: flw fa5, 0(a0)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
-; RV64ZVE32F-NEXT: vmv2r.v v8, v10
-; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds float, ptr %base, <8 x i16> %idxs
%v = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru)
ret <8 x float> %v
@@ -10464,13 +10394,13 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB88_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB88_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB88_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB88_15
; RV64ZVE32F-NEXT: .LBB88_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB88_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB88_16
; RV64ZVE32F-NEXT: .LBB88_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB88_9
@@ -10489,14 +10419,35 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB88_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a2, .LBB88_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: slli a2, a2, 2
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: flw fa5, 0(a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
+; RV64ZVE32F-NEXT: .LBB88_11: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB88_16
-; RV64ZVE32F-NEXT: .LBB88_11: # %else20
+; RV64ZVE32F-NEXT: beqz a1, .LBB88_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: slli a1, a1, 2
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: flw fa5, 0(a0)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
+; RV64ZVE32F-NEXT: .LBB88_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB88_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB88_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -10507,7 +10458,7 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB88_6
-; RV64ZVE32F-NEXT: .LBB88_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB88_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
@@ -10520,7 +10471,7 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB88_7
-; RV64ZVE32F-NEXT: .LBB88_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB88_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: slli a2, a2, 2
@@ -10533,30 +10484,6 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB88_8
; RV64ZVE32F-NEXT: j .LBB88_9
-; RV64ZVE32F-NEXT: .LBB88_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: slli a2, a2, 2
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: flw fa5, 0(a2)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB88_11
-; RV64ZVE32F-NEXT: .LBB88_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: slli a1, a1, 2
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: flw fa5, 0(a0)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
-; RV64ZVE32F-NEXT: vmv2r.v v8, v10
-; RV64ZVE32F-NEXT: ret
%eidxs = sext <8 x i16> %idxs to <8 x i32>
%ptrs = getelementptr inbounds float, ptr %base, <8 x i32> %eidxs
%v = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru)
@@ -10620,13 +10547,13 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: andi a3, a2, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a3, .LBB89_12
+; RV64ZVE32F-NEXT: bnez a3, .LBB89_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a3, a2, 8
-; RV64ZVE32F-NEXT: bnez a3, .LBB89_13
+; RV64ZVE32F-NEXT: bnez a3, .LBB89_15
; RV64ZVE32F-NEXT: .LBB89_6: # %else8
; RV64ZVE32F-NEXT: andi a3, a2, 16
-; RV64ZVE32F-NEXT: bnez a3, .LBB89_14
+; RV64ZVE32F-NEXT: bnez a3, .LBB89_16
; RV64ZVE32F-NEXT: .LBB89_7: # %else11
; RV64ZVE32F-NEXT: andi a3, a2, 32
; RV64ZVE32F-NEXT: beqz a3, .LBB89_9
@@ -10646,14 +10573,37 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: andi a3, a2, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
-; RV64ZVE32F-NEXT: bnez a3, .LBB89_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a3, .LBB89_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a3, v8
+; RV64ZVE32F-NEXT: and a3, a3, a1
+; RV64ZVE32F-NEXT: slli a3, a3, 2
+; RV64ZVE32F-NEXT: add a3, a0, a3
+; RV64ZVE32F-NEXT: flw fa5, 0(a3)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
+; RV64ZVE32F-NEXT: .LBB89_11: # %else17
; RV64ZVE32F-NEXT: andi a2, a2, -128
-; RV64ZVE32F-NEXT: bnez a2, .LBB89_16
-; RV64ZVE32F-NEXT: .LBB89_11: # %else20
+; RV64ZVE32F-NEXT: beqz a2, .LBB89_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: and a1, a2, a1
+; RV64ZVE32F-NEXT: slli a1, a1, 2
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: flw fa5, 0(a0)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
+; RV64ZVE32F-NEXT: .LBB89_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB89_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB89_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a3, v8
; RV64ZVE32F-NEXT: and a3, a3, a1
; RV64ZVE32F-NEXT: slli a3, a3, 2
@@ -10665,7 +10615,7 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2
; RV64ZVE32F-NEXT: andi a3, a2, 8
; RV64ZVE32F-NEXT: beqz a3, .LBB89_6
-; RV64ZVE32F-NEXT: .LBB89_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB89_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a3, v8
@@ -10679,7 +10629,7 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3
; RV64ZVE32F-NEXT: andi a3, a2, 16
; RV64ZVE32F-NEXT: beqz a3, .LBB89_7
-; RV64ZVE32F-NEXT: .LBB89_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB89_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a3, v9
; RV64ZVE32F-NEXT: and a3, a3, a1
@@ -10693,32 +10643,6 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: andi a3, a2, 32
; RV64ZVE32F-NEXT: bnez a3, .LBB89_8
; RV64ZVE32F-NEXT: j .LBB89_9
-; RV64ZVE32F-NEXT: .LBB89_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a3, v8
-; RV64ZVE32F-NEXT: and a3, a3, a1
-; RV64ZVE32F-NEXT: slli a3, a3, 2
-; RV64ZVE32F-NEXT: add a3, a0, a3
-; RV64ZVE32F-NEXT: flw fa5, 0(a3)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
-; RV64ZVE32F-NEXT: andi a2, a2, -128
-; RV64ZVE32F-NEXT: beqz a2, .LBB89_11
-; RV64ZVE32F-NEXT: .LBB89_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: and a1, a2, a1
-; RV64ZVE32F-NEXT: slli a1, a1, 2
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: flw fa5, 0(a0)
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
-; RV64ZVE32F-NEXT: vmv2r.v v8, v10
-; RV64ZVE32F-NEXT: ret
%eidxs = zext <8 x i16> %idxs to <8 x i32>
%ptrs = getelementptr inbounds float, ptr %base, <8 x i32> %eidxs
%v = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru)
@@ -10775,13 +10699,13 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
; RV64ZVE32F-NEXT: andi a2, a1, 4
; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB90_12
+; RV64ZVE32F-NEXT: bnez a2, .LBB90_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
; RV64ZVE32F-NEXT: andi a2, a1, 8
-; RV64ZVE32F-NEXT: bnez a2, .LBB90_13
+; RV64ZVE32F-NEXT: bnez a2, .LBB90_15
; RV64ZVE32F-NEXT: .LBB90_6: # %else8
; RV64ZVE32F-NEXT: andi a2, a1, 16
-; RV64ZVE32F-NEXT: bnez a2, .LBB90_14
+; RV64ZVE32F-NEXT: bnez a2, .LBB90_16
; RV64ZVE32F-NEXT: .LBB90_7: # %else11
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: beqz a2, .LBB90_9
@@ -10799,14 +10723,33 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
; RV64ZVE32F-NEXT: andi a2, a1, 64
; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v12, 2
-; RV64ZVE32F-NEXT: bnez a2, .LBB90_15
-; RV64ZVE32F-NEXT: # %bb.10: # %else17
+; RV64ZVE32F-NEXT: beqz a2, .LBB90_11
+; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: slli a2, a2, 2
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: flw fa5, 0(a2)
+; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
+; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
+; RV64ZVE32F-NEXT: .LBB90_11: # %else17
; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: bnez a1, .LBB90_16
-; RV64ZVE32F-NEXT: .LBB90_11: # %else20
+; RV64ZVE32F-NEXT: beqz a1, .LBB90_13
+; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v8
+; RV64ZVE32F-NEXT: slli a1, a1, 2
+; RV64ZVE32F-NEXT: add a0, a0, a1
+; RV64ZVE32F-NEXT: flw fa5, 0(a0)
+; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
+; RV64ZVE32F-NEXT: .LBB90_13: # %else20
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
-; RV64ZVE32F-NEXT: .LBB90_12: # %cond.load4
+; RV64ZVE32F-NEXT: .LBB90_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -10816,7 +10759,7 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
; RV64ZVE32F-NEXT: vslideup.vi v10, v9, 2
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB90_6
-; RV64ZVE32F-NEXT: .LBB90_13: # %cond.load7
+; RV64ZVE32F-NEXT: .LBB90_15: # %cond.load7
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
@@ -10827,7 +10770,7 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB90_7
-; RV64ZVE32F-NEXT: .LBB90_14: # %cond.load10
+; RV64ZVE32F-NEXT: .LBB90_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: slli a2, a2, 2
@@ -10838,28 +10781,6 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB90_8
; RV64ZVE32F-NEXT: j .LBB90_9
-; RV64ZVE32F-NEXT: .LBB90_15: # %cond.load16
-; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: slli a2, a2, 2
-; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: flw fa5, 0(a2)
-; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6
-; RV64ZVE32F-NEXT: andi a1, a1, -128
-; RV64ZVE32F-NEXT: beqz a1, .LBB90_11
-; RV64ZVE32F-NEXT: .LBB90_16: # %cond.load19
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: slli a1, a1, 2
-; RV64ZVE32F-NEXT: add a0, a0, a1
-; RV64ZVE32F-NEXT: flw fa5, 0(a0)
-; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
-; RV64ZVE32F-NEXT: vmv2r.v v8, v10
-; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds float, ptr %base, <8 x i32> %idxs
%v = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru)
ret <8 x float> %v
@@ -11135,11 +11056,13 @@ define <4 x double> @mgather_truemask_v4f64(<4 x ptr> %ptrs, <4 x double> %passt
define <4 x double> @mgather_falsemask_v4f64(<4 x ptr> %ptrs, <4 x double> %passthru) {
; RV32V-LABEL: mgather_falsemask_v4f64:
; RV32V: # %bb.0:
+; RV32V-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32V-NEXT: vmv2r.v v8, v10
; RV32V-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4f64:
; RV64V: # %bb.0:
+; RV64V-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64V-NEXT: vmv2r.v v8, v10
; RV64V-NEXT: ret
;
@@ -13700,6 +13623,7 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 15
; RV64ZVE32F-NEXT: .LBB107_24: # %else44
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB107_25: # %cond.load4
@@ -14086,6 +14010,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 31
; RV64ZVE32F-NEXT: .LBB108_48: # %else92
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB108_49: # %cond.load4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
index e0cf39c75da240..dd746f8cd92b7f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
@@ -318,6 +318,7 @@ define <128 x i16> @masked_load_v128i16(ptr %a, <128 x i1> %mask) {
define <256 x i8> @masked_load_v256i8(ptr %a, <256 x i1> %mask) {
; CHECK-LABEL: masked_load_v256i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
index 46c2033d28b387..268f7c4e041dbe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
@@ -135,6 +135,7 @@ declare <16 x half> @llvm.vp.nearbyint.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_nearbyint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v16f16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI6_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -263,6 +264,7 @@ declare <8 x float> @llvm.vp.nearbyint.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_nearbyint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v8f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -307,6 +309,7 @@ declare <16 x float> @llvm.vp.nearbyint.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_nearbyint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v16f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -393,6 +396,7 @@ declare <4 x double> @llvm.vp.nearbyint.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_nearbyint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v4f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -437,6 +441,7 @@ declare <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_nearbyint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v8f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -481,6 +486,7 @@ declare <15 x double> @llvm.vp.nearbyint.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_nearbyint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v15f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -525,6 +531,7 @@ declare <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_nearbyint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v16f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -569,6 +576,7 @@ declare <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v32f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
@@ -584,6 +592,7 @@ define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
; CHECK-NEXT: slli a2, a2, 3
; CHECK-NEXT: sub sp, sp, a2
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: lui a2, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll
index ad358d73202402..b278c89042c683 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll
@@ -23,6 +23,7 @@ declare i1 @llvm.vp.reduce.or.v1i1(i1, <1 x i1>, <1 x i1>, i32)
define zeroext i1 @vpreduce_or_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_v1i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -39,6 +40,7 @@ declare i1 @llvm.vp.reduce.xor.v1i1(i1, <1 x i1>, <1 x i1>, i32)
define zeroext i1 @vpreduce_xor_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_v1i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -71,6 +73,7 @@ declare i1 @llvm.vp.reduce.or.v2i1(i1, <2 x i1>, <2 x i1>, i32)
define zeroext i1 @vpreduce_or_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_v2i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -87,6 +90,7 @@ declare i1 @llvm.vp.reduce.xor.v2i1(i1, <2 x i1>, <2 x i1>, i32)
define zeroext i1 @vpreduce_xor_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_v2i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -119,6 +123,7 @@ declare i1 @llvm.vp.reduce.or.v4i1(i1, <4 x i1>, <4 x i1>, i32)
define zeroext i1 @vpreduce_or_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_v4i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -135,6 +140,7 @@ declare i1 @llvm.vp.reduce.xor.v4i1(i1, <4 x i1>, <4 x i1>, i32)
define zeroext i1 @vpreduce_xor_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_v4i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -167,6 +173,7 @@ declare i1 @llvm.vp.reduce.or.v8i1(i1, <8 x i1>, <8 x i1>, i32)
define zeroext i1 @vpreduce_or_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_v8i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -183,6 +190,7 @@ declare i1 @llvm.vp.reduce.xor.v8i1(i1, <8 x i1>, <8 x i1>, i32)
define zeroext i1 @vpreduce_xor_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_v8i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -231,6 +239,7 @@ declare i1 @llvm.vp.reduce.and.v256i1(i1, <256 x i1>, <256 x i1>, i32)
define zeroext i1 @vpreduce_and_v256i1(i1 zeroext %s, <256 x i1> %v, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_and_v256i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v9
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: li a3, 128
@@ -239,6 +248,7 @@ define zeroext i1 @vpreduce_and_v256i1(i1 zeroext %s, <256 x i1> %v, <256 x i1>
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB14_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmnot.m v9, v9
@@ -265,6 +275,7 @@ declare i1 @llvm.vp.reduce.or.v16i1(i1, <16 x i1>, <16 x i1>, i32)
define zeroext i1 @vpreduce_or_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_v16i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -281,6 +292,7 @@ declare i1 @llvm.vp.reduce.xor.v16i1(i1, <16 x i1>, <16 x i1>, i32)
define zeroext i1 @vpreduce_xor_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_v16i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -297,6 +309,7 @@ declare i1 @llvm.vp.reduce.add.v1i1(i1, <1 x i1>, <1 x i1>, i32)
define zeroext i1 @vpreduce_add_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_v1i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -313,6 +326,7 @@ declare i1 @llvm.vp.reduce.add.v2i1(i1, <2 x i1>, <2 x i1>, i32)
define zeroext i1 @vpreduce_add_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_v2i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -329,6 +343,7 @@ declare i1 @llvm.vp.reduce.add.v4i1(i1, <4 x i1>, <4 x i1>, i32)
define zeroext i1 @vpreduce_add_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_v4i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -345,6 +360,7 @@ declare i1 @llvm.vp.reduce.add.v8i1(i1, <8 x i1>, <8 x i1>, i32)
define zeroext i1 @vpreduce_add_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_v8i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -361,6 +377,7 @@ declare i1 @llvm.vp.reduce.add.v16i1(i1, <16 x i1>, <16 x i1>, i32)
define zeroext i1 @vpreduce_add_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_v16i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -489,6 +506,7 @@ declare i1 @llvm.vp.reduce.smin.v1i1(i1, <1 x i1>, <1 x i1>, i32)
define zeroext i1 @vpreduce_smin_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v1i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -505,6 +523,7 @@ declare i1 @llvm.vp.reduce.smin.v2i1(i1, <2 x i1>, <2 x i1>, i32)
define zeroext i1 @vpreduce_smin_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v2i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -521,6 +540,7 @@ declare i1 @llvm.vp.reduce.smin.v4i1(i1, <4 x i1>, <4 x i1>, i32)
define zeroext i1 @vpreduce_smin_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v4i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -537,6 +557,7 @@ declare i1 @llvm.vp.reduce.smin.v8i1(i1, <8 x i1>, <8 x i1>, i32)
define zeroext i1 @vpreduce_smin_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v8i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -553,6 +574,7 @@ declare i1 @llvm.vp.reduce.smin.v16i1(i1, <16 x i1>, <16 x i1>, i32)
define zeroext i1 @vpreduce_smin_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v16i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -569,6 +591,7 @@ declare i1 @llvm.vp.reduce.smin.v32i1(i1, <32 x i1>, <32 x i1>, i32)
define zeroext i1 @vpreduce_smin_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v32i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -585,6 +608,7 @@ declare i1 @llvm.vp.reduce.smin.v64i1(i1, <64 x i1>, <64 x i1>, i32)
define zeroext i1 @vpreduce_smin_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v64i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -601,6 +625,7 @@ declare i1 @llvm.vp.reduce.umax.v1i1(i1, <1 x i1>, <1 x i1>, i32)
define zeroext i1 @vpreduce_umax_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v1i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -617,6 +642,7 @@ declare i1 @llvm.vp.reduce.umax.v2i1(i1, <2 x i1>, <2 x i1>, i32)
define zeroext i1 @vpreduce_umax_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v2i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -633,6 +659,7 @@ declare i1 @llvm.vp.reduce.umax.v4i1(i1, <4 x i1>, <4 x i1>, i32)
define zeroext i1 @vpreduce_umax_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v4i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -649,6 +676,7 @@ declare i1 @llvm.vp.reduce.umax.v8i1(i1, <8 x i1>, <8 x i1>, i32)
define zeroext i1 @vpreduce_umax_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v8i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -665,6 +693,7 @@ declare i1 @llvm.vp.reduce.umax.v16i1(i1, <16 x i1>, <16 x i1>, i32)
define zeroext i1 @vpreduce_umax_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v16i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -681,6 +710,7 @@ declare i1 @llvm.vp.reduce.umax.v32i1(i1, <32 x i1>, <32 x i1>, i32)
define zeroext i1 @vpreduce_umax_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v32i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -697,6 +727,7 @@ declare i1 @llvm.vp.reduce.umax.v64i1(i1, <64 x i1>, <64 x i1>, i32)
define zeroext i1 @vpreduce_umax_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v64i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
index b8617fda3aa7ec..5104e70c8a1252 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
@@ -123,6 +123,7 @@ declare <16 x half> @llvm.vp.rint.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_rint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v16f16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI6_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -239,6 +240,7 @@ declare <8 x float> @llvm.vp.rint.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_rint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v8f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -279,6 +281,7 @@ declare <16 x float> @llvm.vp.rint.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_rint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v16f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -357,6 +360,7 @@ declare <4 x double> @llvm.vp.rint.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_rint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v4f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -397,6 +401,7 @@ declare <8 x double> @llvm.vp.rint.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_rint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v8f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -437,6 +442,7 @@ declare <15 x double> @llvm.vp.rint.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_rint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v15f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -477,6 +483,7 @@ declare <16 x double> @llvm.vp.rint.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_rint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v16f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -517,6 +524,7 @@ declare <32 x double> @llvm.vp.rint.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_rint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v32f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
@@ -532,6 +540,7 @@ define <32 x double> @vp_rint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
; CHECK-NEXT: slli a2, a2, 3
; CHECK-NEXT: sub sp, sp, a2
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
index 820a05e3d6042b..2f50b768c4a656 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
@@ -194,6 +194,7 @@ define <8 x half> @vp_round_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
;
; ZVFHMIN-LABEL: vp_round_v8f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -261,6 +262,7 @@ declare <16 x half> @llvm.vp.round.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_v16f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -280,6 +282,7 @@ define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
;
; ZVFHMIN-LABEL: vp_round_v16f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -431,6 +434,7 @@ declare <8 x float> @llvm.vp.round.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_round_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v8f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -475,6 +479,7 @@ declare <16 x float> @llvm.vp.round.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_round_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v16f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -561,6 +566,7 @@ declare <4 x double> @llvm.vp.round.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_round_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v4f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -605,6 +611,7 @@ declare <8 x double> @llvm.vp.round.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_round_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v8f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -649,6 +656,7 @@ declare <15 x double> @llvm.vp.round.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_round_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v15f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -693,6 +701,7 @@ declare <16 x double> @llvm.vp.round.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_round_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v16f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -743,6 +752,7 @@ define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -757,6 +767,7 @@ define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
index 8391c7939180a0..0032dd956ed9e8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
@@ -194,6 +194,7 @@ define <8 x half> @vp_roundeven_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %
;
; ZVFHMIN-LABEL: vp_roundeven_v8f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -261,6 +262,7 @@ declare <16 x half> @llvm.vp.roundeven.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_v16f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -280,6 +282,7 @@ define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroe
;
; ZVFHMIN-LABEL: vp_roundeven_v16f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -431,6 +434,7 @@ declare <8 x float> @llvm.vp.roundeven.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_roundeven_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v8f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -475,6 +479,7 @@ declare <16 x float> @llvm.vp.roundeven.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_roundeven_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v16f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -561,6 +566,7 @@ declare <4 x double> @llvm.vp.roundeven.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_roundeven_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v4f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -605,6 +611,7 @@ declare <8 x double> @llvm.vp.roundeven.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_roundeven_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v8f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -649,6 +656,7 @@ declare <15 x double> @llvm.vp.roundeven.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_roundeven_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v15f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -693,6 +701,7 @@ declare <16 x double> @llvm.vp.roundeven.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_roundeven_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v16f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -743,6 +752,7 @@ define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -757,6 +767,7 @@ define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
index 8c38d244602655..59bb431ad56487 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
@@ -194,6 +194,7 @@ define <8 x half> @vp_roundtozero_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext
;
; ZVFHMIN-LABEL: vp_roundtozero_v8f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -261,6 +262,7 @@ declare <16 x half> @llvm.vp.roundtozero.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_v16f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -280,6 +282,7 @@ define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zer
;
; ZVFHMIN-LABEL: vp_roundtozero_v16f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -431,6 +434,7 @@ declare <8 x float> @llvm.vp.roundtozero.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_roundtozero_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v8f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -475,6 +479,7 @@ declare <16 x float> @llvm.vp.roundtozero.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_roundtozero_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v16f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -561,6 +566,7 @@ declare <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_roundtozero_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v4f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -605,6 +611,7 @@ declare <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_roundtozero_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v8f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -649,6 +656,7 @@ declare <15 x double> @llvm.vp.roundtozero.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_roundtozero_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v15f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -693,6 +701,7 @@ declare <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_roundtozero_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v16f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -743,6 +752,7 @@ define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -757,6 +767,7 @@ define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
index d52c42891fcc3b..7b004c67586c45 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
@@ -598,6 +598,7 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1>
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -622,6 +623,7 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1>
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: .LBB51_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -648,6 +650,7 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1>
define <256 x i1> @icmp_eq_vx_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_v256i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
@@ -662,6 +665,7 @@ define <256 x i1> @icmp_eq_vx_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 z
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB52_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
@@ -677,6 +681,7 @@ define <256 x i1> @icmp_eq_vx_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 z
define <256 x i1> @icmp_eq_vx_swap_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_swap_v256i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
@@ -691,6 +696,7 @@ define <256 x i1> @icmp_eq_vx_swap_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB53_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
index 38026bb591f797..16e54255b460da 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
@@ -8,6 +8,7 @@
define <8 x i32> @concat_2xv4i32(<4 x i32> %a, <4 x i32> %b) {
; VLA-LABEL: concat_2xv4i32:
; VLA: # %bb.0:
+; VLA-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; VLA-NEXT: vmv1r.v v10, v9
; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; VLA-NEXT: vslideup.vi v8, v10, 4
@@ -32,6 +33,7 @@ define <8 x i32> @concat_4xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x
;
; VLS-LABEL: concat_4xv2i32:
; VLS: # %bb.0:
+; VLS-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; VLS-NEXT: vmv1r.v v13, v10
; VLS-NEXT: vmv1r.v v12, v8
; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma
@@ -62,6 +64,7 @@ define <8 x i32> @concat_8xv1i32(<1 x i32> %a, <1 x i32> %b, <1 x i32> %c, <1 x
;
; VLS-LABEL: concat_8xv1i32:
; VLS: # %bb.0:
+; VLS-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; VLS-NEXT: vmv1r.v v17, v12
; VLS-NEXT: vmv1r.v v16, v8
; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
@@ -89,6 +92,7 @@ define <8 x i32> @concat_8xv1i32(<1 x i32> %a, <1 x i32> %b, <1 x i32> %c, <1 x
define <16 x i32> @concat_2xv8i32(<8 x i32> %a, <8 x i32> %b) {
; VLA-LABEL: concat_2xv8i32:
; VLA: # %bb.0:
+; VLA-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; VLA-NEXT: vmv2r.v v12, v10
; VLA-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; VLA-NEXT: vslideup.vi v8, v12, 8
@@ -104,6 +108,7 @@ define <16 x i32> @concat_2xv8i32(<8 x i32> %a, <8 x i32> %b) {
define <16 x i32> @concat_4xv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
; VLA-LABEL: concat_4xv4i32:
; VLA: # %bb.0:
+; VLA-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; VLA-NEXT: vmv1r.v v14, v11
; VLA-NEXT: vmv1r.v v12, v10
; VLA-NEXT: vmv1r.v v10, v9
@@ -140,6 +145,7 @@ define <16 x i32> @concat_8xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x
;
; VLS-LABEL: concat_8xv2i32:
; VLS: # %bb.0:
+; VLS-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; VLS-NEXT: vmv1r.v v19, v14
; VLS-NEXT: vmv1r.v v18, v12
; VLS-NEXT: vmv1r.v v17, v10
@@ -164,6 +170,7 @@ define <16 x i32> @concat_8xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x
define <32 x i32> @concat_2xv16i32(<16 x i32> %a, <16 x i32> %b) {
; VLA-LABEL: concat_2xv16i32:
; VLA: # %bb.0:
+; VLA-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; VLA-NEXT: vmv4r.v v16, v12
; VLA-NEXT: li a0, 32
; VLA-NEXT: vsetvli zero, a0, e32, m8, ta, ma
@@ -180,6 +187,7 @@ define <32 x i32> @concat_2xv16i32(<16 x i32> %a, <16 x i32> %b) {
define <32 x i32> @concat_4xv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; VLA-LABEL: concat_4xv8i32:
; VLA: # %bb.0:
+; VLA-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; VLA-NEXT: vmv2r.v v20, v14
; VLA-NEXT: vmv2r.v v16, v12
; VLA-NEXT: vmv2r.v v12, v10
@@ -203,6 +211,7 @@ define <32 x i32> @concat_4xv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x
define <32 x i32> @concat_8xv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d, <4 x i32> %e, <4 x i32> %f, <4 x i32> %g, <4 x i32> %h) {
; VLA-LABEL: concat_8xv4i32:
; VLA: # %bb.0:
+; VLA-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; VLA-NEXT: vmv1r.v v18, v15
; VLA-NEXT: vmv1r.v v20, v14
; VLA-NEXT: vmv1r.v v14, v13
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
index d461fa8378cffc..6f4c2d6cb64122 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
@@ -108,6 +108,7 @@ define <4 x i64> @m2_splat_into_identity(<4 x i64> %v1) vscale_range(2,2) {
define <4 x i64> @m2_broadcast_i128(<4 x i64> %v1) vscale_range(2,2) {
; CHECK-LABEL: m2_broadcast_i128:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: ret
%res = shufflevector <4 x i64> %v1, <4 x i64> poison, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -117,6 +118,7 @@ define <4 x i64> @m2_broadcast_i128(<4 x i64> %v1) vscale_range(2,2) {
define <8 x i64> @m4_broadcast_i128(<8 x i64> %v1) vscale_range(2,2) {
; CHECK-LABEL: m4_broadcast_i128:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
index 407535831aedad..3cc6d814d9a49c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
@@ -966,6 +966,7 @@ define <16 x i8> @reverse_v16i8_2(<8 x i8> %a, <8 x i8> %b) {
define <32 x i8> @reverse_v32i8_2(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: reverse_v32i8_2:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
@@ -1035,6 +1036,7 @@ define <8 x i16> @reverse_v8i16_2(<4 x i16> %a, <4 x i16> %b) {
define <16 x i16> @reverse_v16i16_2(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: reverse_v16i16_2:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -1060,6 +1062,7 @@ define <16 x i16> @reverse_v16i16_2(<8 x i16> %a, <8 x i16> %b) {
define <32 x i16> @reverse_v32i16_2(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: reverse_v32i16_2:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -1116,6 +1119,7 @@ define <4 x i32> @reverse_v4i32_2(<2 x i32> %a, < 2 x i32> %b) {
define <8 x i32> @reverse_v8i32_2(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: reverse_v8i32_2:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -1142,6 +1146,7 @@ define <8 x i32> @reverse_v8i32_2(<4 x i32> %a, <4 x i32> %b) {
define <16 x i32> @reverse_v16i32_2(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: reverse_v16i32_2:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -1170,6 +1175,7 @@ define <16 x i32> @reverse_v16i32_2(<8 x i32> %a, <8 x i32> %b) {
define <32 x i32> @reverse_v32i32_2(<16 x i32> %a, <16 x i32> %b) {
; CHECK-LABEL: reverse_v32i32_2:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -1219,6 +1225,7 @@ define <4 x i64> @reverse_v4i64_2(<2 x i64> %a, < 2 x i64> %b) {
define <8 x i64> @reverse_v8i64_2(<4 x i64> %a, <4 x i64> %b) {
; CHECK-LABEL: reverse_v8i64_2:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
@@ -1289,6 +1296,7 @@ define <8 x half> @reverse_v8f16_2(<4 x half> %a, <4 x half> %b) {
define <16 x half> @reverse_v16f16_2(<8 x half> %a, <8 x half> %b) {
; CHECK-LABEL: reverse_v16f16_2:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -1361,6 +1369,7 @@ define <4 x float> @reverse_v4f32_2(<2 x float> %a, <2 x float> %b) {
define <8 x float> @reverse_v8f32_2(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: reverse_v8f32_2:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -1387,6 +1396,7 @@ define <8 x float> @reverse_v8f32_2(<4 x float> %a, <4 x float> %b) {
define <16 x float> @reverse_v16f32_2(<8 x float> %a, <8 x float> %b) {
; CHECK-LABEL: reverse_v16f32_2:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -1430,6 +1440,7 @@ define <4 x double> @reverse_v4f64_2(<2 x double> %a, < 2 x double> %b) {
define <8 x double> @reverse_v8f64_2(<4 x double> %a, <4 x double> %b) {
; CHECK-LABEL: reverse_v8f64_2:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll
index c37c3a9ee0ea0c..8fecbaacaead44 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll
@@ -415,6 +415,7 @@ define <4 x i8> @vslide1up_4xi8_neg_incorrect_insert3(<4 x i8> %v, i8 %b) {
define <2 x i8> @vslide1up_4xi8_neg_length_changing(<4 x i8> %v, i8 %b) {
; CHECK-LABEL: vslide1up_4xi8_neg_length_changing:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
index 1a08c613ca36a3..df08d19b187187 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
@@ -62,6 +62,7 @@ define void @gather_masked(ptr noalias nocapture %A, ptr noalias nocapture reado
; CHECK-NEXT: li a4, 5
; CHECK-NEXT: .LBB1_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, mu
; CHECK-NEXT: vlse8.v v9, (a1), a4, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
index 1c2c90478a1f77..db13b174b30983 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
@@ -542,6 +542,7 @@ declare <3 x double> @llvm.experimental.vp.strided.load.v3f64.p0.i32(ptr, i32, <
define <32 x double> @strided_vpload_v32f64(ptr %ptr, i32 signext %stride, <32 x i1> %m, i32 zeroext %evl) nounwind {
; CHECK-LABEL: strided_vpload_v32f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: li a4, 16
; CHECK-NEXT: mv a3, a2
@@ -598,6 +599,7 @@ declare <32 x double> @llvm.experimental.vp.strided.load.v32f64.p0.i32(ptr, i32,
define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_load_v33f64:
; CHECK-RV32: # %bb.0:
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv1r.v v8, v0
; CHECK-RV32-NEXT: li a5, 32
; CHECK-RV32-NEXT: mv a3, a4
@@ -648,6 +650,7 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask
;
; CHECK-RV64-LABEL: strided_load_v33f64:
; CHECK-RV64: # %bb.0:
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv1r.v v8, v0
; CHECK-RV64-NEXT: li a5, 32
; CHECK-RV64-NEXT: mv a4, a3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
index 12893ec55cda76..e07be9320990a9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
@@ -53,6 +53,7 @@ declare <128 x i7> @llvm.vp.trunc.v128i7.v128i16(<128 x i16>, <128 x i1>, i32)
define <128 x i7> @vtrunc_v128i7_v128i16(<128 x i16> %a, <128 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_v128i7_v128i16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: li a1, 64
; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
@@ -231,6 +232,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: mul a2, a2, a3
; CHECK-NEXT: sub sp, sp, a2
; CHECK-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 72 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: li a3, 24
@@ -282,6 +284,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a3, 16
; CHECK-NEXT: .LBB16_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v27
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a5)
@@ -306,6 +309,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: li a6, 64
; CHECK-NEXT: .LBB16_4:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v27
; CHECK-NEXT: addi a5, a1, 384
; CHECK-NEXT: li a3, 32
@@ -342,6 +346,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: li a6, 16
; CHECK-NEXT: .LBB16_6:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v26
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a5)
@@ -390,6 +395,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: # %bb.9:
; CHECK-NEXT: li a4, 16
; CHECK-NEXT: .LBB16_10:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v25, v7, 2
@@ -406,6 +412,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: # %bb.11:
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: .LBB16_12:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: csrr a4, vlenb
; CHECK-NEXT: slli a4, a4, 4
@@ -490,6 +497,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: # %bb.13:
; CHECK-NEXT: li a7, 16
; CHECK-NEXT: .LBB16_14:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 5
@@ -541,6 +549,7 @@ declare <32 x i32> @llvm.vp.trunc.v32i32.v32i64(<32 x i64>, <32 x i1>, i32)
define <32 x i32> @vtrunc_v32i32_v32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_v32i32_v32i64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
index db03dc3d5ab1e2..023d954a4f36ee 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
@@ -80,14 +80,8 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> %
; RV32-SLOW-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV32-SLOW-NEXT: vmv.x.s a0, v0
; RV32-SLOW-NEXT: andi a1, a0, 1
-; RV32-SLOW-NEXT: bnez a1, .LBB4_3
-; RV32-SLOW-NEXT: # %bb.1: # %else
-; RV32-SLOW-NEXT: andi a0, a0, 2
-; RV32-SLOW-NEXT: bnez a0, .LBB4_4
-; RV32-SLOW-NEXT: .LBB4_2: # %else2
-; RV32-SLOW-NEXT: vmv1r.v v8, v9
-; RV32-SLOW-NEXT: ret
-; RV32-SLOW-NEXT: .LBB4_3: # %cond.load
+; RV32-SLOW-NEXT: beqz a1, .LBB4_2
+; RV32-SLOW-NEXT: # %bb.1: # %cond.load
; RV32-SLOW-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; RV32-SLOW-NEXT: vmv.x.s a1, v8
; RV32-SLOW-NEXT: lbu a2, 1(a1)
@@ -96,9 +90,10 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> %
; RV32-SLOW-NEXT: or a1, a2, a1
; RV32-SLOW-NEXT: vsetvli zero, zero, e16, m2, tu, ma
; RV32-SLOW-NEXT: vmv.s.x v9, a1
+; RV32-SLOW-NEXT: .LBB4_2: # %else
; RV32-SLOW-NEXT: andi a0, a0, 2
-; RV32-SLOW-NEXT: beqz a0, .LBB4_2
-; RV32-SLOW-NEXT: .LBB4_4: # %cond.load1
+; RV32-SLOW-NEXT: beqz a0, .LBB4_4
+; RV32-SLOW-NEXT: # %bb.3: # %cond.load1
; RV32-SLOW-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; RV32-SLOW-NEXT: vslidedown.vi v8, v8, 1
; RV32-SLOW-NEXT: vmv.x.s a0, v8
@@ -109,6 +104,8 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> %
; RV32-SLOW-NEXT: vmv.s.x v8, a0
; RV32-SLOW-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; RV32-SLOW-NEXT: vslideup.vi v9, v8, 1
+; RV32-SLOW-NEXT: .LBB4_4: # %else2
+; RV32-SLOW-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-SLOW-NEXT: vmv1r.v v8, v9
; RV32-SLOW-NEXT: ret
;
@@ -117,14 +114,8 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> %
; RV64-SLOW-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64-SLOW-NEXT: vmv.x.s a0, v0
; RV64-SLOW-NEXT: andi a1, a0, 1
-; RV64-SLOW-NEXT: bnez a1, .LBB4_3
-; RV64-SLOW-NEXT: # %bb.1: # %else
-; RV64-SLOW-NEXT: andi a0, a0, 2
-; RV64-SLOW-NEXT: bnez a0, .LBB4_4
-; RV64-SLOW-NEXT: .LBB4_2: # %else2
-; RV64-SLOW-NEXT: vmv1r.v v8, v9
-; RV64-SLOW-NEXT: ret
-; RV64-SLOW-NEXT: .LBB4_3: # %cond.load
+; RV64-SLOW-NEXT: beqz a1, .LBB4_2
+; RV64-SLOW-NEXT: # %bb.1: # %cond.load
; RV64-SLOW-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-SLOW-NEXT: vmv.x.s a1, v8
; RV64-SLOW-NEXT: lbu a2, 1(a1)
@@ -133,9 +124,10 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> %
; RV64-SLOW-NEXT: or a1, a2, a1
; RV64-SLOW-NEXT: vsetvli zero, zero, e16, m2, tu, ma
; RV64-SLOW-NEXT: vmv.s.x v9, a1
+; RV64-SLOW-NEXT: .LBB4_2: # %else
; RV64-SLOW-NEXT: andi a0, a0, 2
-; RV64-SLOW-NEXT: beqz a0, .LBB4_2
-; RV64-SLOW-NEXT: .LBB4_4: # %cond.load1
+; RV64-SLOW-NEXT: beqz a0, .LBB4_4
+; RV64-SLOW-NEXT: # %bb.3: # %cond.load1
; RV64-SLOW-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV64-SLOW-NEXT: vslidedown.vi v8, v8, 1
; RV64-SLOW-NEXT: vmv.x.s a0, v8
@@ -146,6 +138,8 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> %
; RV64-SLOW-NEXT: vmv.s.x v8, a0
; RV64-SLOW-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; RV64-SLOW-NEXT: vslideup.vi v9, v8, 1
+; RV64-SLOW-NEXT: .LBB4_4: # %else2
+; RV64-SLOW-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-SLOW-NEXT: vmv1r.v v8, v9
; RV64-SLOW-NEXT: ret
;
@@ -174,23 +168,18 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> %
; RV32-SLOW-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV32-SLOW-NEXT: vmv.x.s a0, v0
; RV32-SLOW-NEXT: andi a1, a0, 1
-; RV32-SLOW-NEXT: bnez a1, .LBB5_3
-; RV32-SLOW-NEXT: # %bb.1: # %else
-; RV32-SLOW-NEXT: andi a0, a0, 2
-; RV32-SLOW-NEXT: bnez a0, .LBB5_4
-; RV32-SLOW-NEXT: .LBB5_2: # %else2
-; RV32-SLOW-NEXT: vmv1r.v v8, v9
-; RV32-SLOW-NEXT: ret
-; RV32-SLOW-NEXT: .LBB5_3: # %cond.load
+; RV32-SLOW-NEXT: beqz a1, .LBB5_2
+; RV32-SLOW-NEXT: # %bb.1: # %cond.load
; RV32-SLOW-NEXT: vsetivli zero, 2, e32, m1, tu, ma
; RV32-SLOW-NEXT: vmv.x.s a1, v8
; RV32-SLOW-NEXT: lw a2, 0(a1)
; RV32-SLOW-NEXT: lw a1, 4(a1)
; RV32-SLOW-NEXT: vslide1down.vx v9, v9, a2
; RV32-SLOW-NEXT: vslide1down.vx v9, v9, a1
+; RV32-SLOW-NEXT: .LBB5_2: # %else
; RV32-SLOW-NEXT: andi a0, a0, 2
-; RV32-SLOW-NEXT: beqz a0, .LBB5_2
-; RV32-SLOW-NEXT: .LBB5_4: # %cond.load1
+; RV32-SLOW-NEXT: beqz a0, .LBB5_4
+; RV32-SLOW-NEXT: # %bb.3: # %cond.load1
; RV32-SLOW-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; RV32-SLOW-NEXT: vslidedown.vi v8, v8, 1
; RV32-SLOW-NEXT: vmv.x.s a0, v8
@@ -201,6 +190,8 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> %
; RV32-SLOW-NEXT: vslide1down.vx v8, v8, a0
; RV32-SLOW-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-SLOW-NEXT: vslideup.vi v9, v8, 1
+; RV32-SLOW-NEXT: .LBB5_4: # %else2
+; RV32-SLOW-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-SLOW-NEXT: vmv1r.v v8, v9
; RV32-SLOW-NEXT: ret
;
@@ -209,14 +200,8 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> %
; RV64-SLOW-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64-SLOW-NEXT: vmv.x.s a0, v0
; RV64-SLOW-NEXT: andi a1, a0, 1
-; RV64-SLOW-NEXT: bnez a1, .LBB5_3
-; RV64-SLOW-NEXT: # %bb.1: # %else
-; RV64-SLOW-NEXT: andi a0, a0, 2
-; RV64-SLOW-NEXT: bnez a0, .LBB5_4
-; RV64-SLOW-NEXT: .LBB5_2: # %else2
-; RV64-SLOW-NEXT: vmv1r.v v8, v9
-; RV64-SLOW-NEXT: ret
-; RV64-SLOW-NEXT: .LBB5_3: # %cond.load
+; RV64-SLOW-NEXT: beqz a1, .LBB5_2
+; RV64-SLOW-NEXT: # %bb.1: # %cond.load
; RV64-SLOW-NEXT: vsetvli zero, zero, e64, m8, tu, ma
; RV64-SLOW-NEXT: vmv.x.s a1, v8
; RV64-SLOW-NEXT: lwu a2, 4(a1)
@@ -224,9 +209,10 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> %
; RV64-SLOW-NEXT: slli a2, a2, 32
; RV64-SLOW-NEXT: or a1, a2, a1
; RV64-SLOW-NEXT: vmv.s.x v9, a1
+; RV64-SLOW-NEXT: .LBB5_2: # %else
; RV64-SLOW-NEXT: andi a0, a0, 2
-; RV64-SLOW-NEXT: beqz a0, .LBB5_2
-; RV64-SLOW-NEXT: .LBB5_4: # %cond.load1
+; RV64-SLOW-NEXT: beqz a0, .LBB5_4
+; RV64-SLOW-NEXT: # %bb.3: # %cond.load1
; RV64-SLOW-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-SLOW-NEXT: vslidedown.vi v8, v8, 1
; RV64-SLOW-NEXT: vmv.x.s a0, v8
@@ -236,6 +222,8 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> %
; RV64-SLOW-NEXT: or a0, a1, a0
; RV64-SLOW-NEXT: vmv.s.x v8, a0
; RV64-SLOW-NEXT: vslideup.vi v9, v8, 1
+; RV64-SLOW-NEXT: .LBB5_4: # %else2
+; RV64-SLOW-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-SLOW-NEXT: vmv1r.v v8, v9
; RV64-SLOW-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
index 5be1a771eb2799..a5cf019db6a279 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
@@ -363,6 +363,7 @@ declare <256 x i8> @llvm.vp.add.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32)
define <256 x i8> @vadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_v258i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
@@ -377,6 +378,7 @@ define <256 x i8> @vadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %ev
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
index ac48542ca9ebb3..9d68f737946a43 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
@@ -267,6 +267,7 @@ declare <256 x i8> @llvm.vp.smax.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32)
define <256 x i8> @vmax_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_v258i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
@@ -281,6 +282,7 @@ define <256 x i8> @vmax_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zero
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB22_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
index 794eef6ed40b21..1ba36b879d38d1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
@@ -266,6 +266,7 @@ declare <256 x i8> @llvm.vp.umax.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32)
define <256 x i8> @vmaxu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_v258i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
@@ -280,6 +281,7 @@ define <256 x i8> @vmaxu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zer
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB22_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
index 34011f6bd8acd3..0bb646d360556d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
@@ -267,6 +267,7 @@ declare <256 x i8> @llvm.vp.smin.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32)
define <256 x i8> @vmin_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_v258i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
@@ -281,6 +282,7 @@ define <256 x i8> @vmin_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zero
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB22_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
index 79e72b7d9cac9d..a1cee432cf37b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
@@ -266,6 +266,7 @@ declare <256 x i8> @llvm.vp.umin.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32)
define <256 x i8> @vminu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_v258i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
@@ -280,6 +281,7 @@ define <256 x i8> @vminu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zer
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB22_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
index 24e75cde2ce915..312c3061f4a63b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
@@ -2617,6 +2617,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(ptr %base, <32 x i32>
define <32 x double> @vpgather_baseidx_v32f64(ptr %base, <32 x i64> %idxs, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_v32f64:
; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v7, v0
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vnsrl.wi v24, v16, 0
@@ -2640,6 +2641,7 @@ define <32 x double> @vpgather_baseidx_v32f64(ptr %base, <32 x i64> %idxs, <32 x
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a1, 16
; RV32-NEXT: .LBB104_2:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
index 71f497e4c7be48..17b7aeda87118b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
@@ -394,6 +394,7 @@ declare <33 x double> @llvm.vp.load.v33f64.p0(ptr, <33 x i1>, i32)
define <33 x double> @vpload_v33f64(ptr %ptr, <33 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v33f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: li a4, 32
; CHECK-NEXT: mv a3, a2
@@ -428,6 +429,7 @@ define <33 x double> @vpload_v33f64(ptr %ptr, <33 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: .LBB32_6:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a1), v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
index a11c2b6bca12ec..ac93a306dd877b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
@@ -1181,6 +1181,7 @@ define <32 x double> @vpmerge_vv_v32f64(<32 x double> %va, <32 x double> %vb, <3
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
index 888fc79f0122da..2b80c37496a3ec 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
@@ -372,6 +372,7 @@ declare <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>,
define <256 x i8> @vsadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsadd_vi_v258i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
@@ -386,6 +387,7 @@ define <256 x i8> @vsadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %e
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
index e1d57ae1e67414..a95226b55ab70e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
@@ -368,6 +368,7 @@ declare <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>,
define <256 x i8> @vsaddu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsaddu_vi_v258i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
@@ -382,6 +383,7 @@ define <256 x i8> @vsaddu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
index 1d8af4c46cc078..ececb43de22d7d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
@@ -163,6 +163,7 @@ define <256 x i8> @select_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i3
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v6, v8
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: li a2, 128
@@ -182,6 +183,7 @@ define <256 x i8> @select_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i3
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: .LBB11_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
index 8fad3db55f9bcd..1cb2ce508b6401 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
@@ -384,6 +384,7 @@ declare <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>,
define <256 x i8> @vssub_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vssub_vi_v258i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: addi a3, a1, -128
@@ -399,6 +400,7 @@ define <256 x i8> @vssub_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %e
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
index ca35aa6c4a94c1..03691d58ca3078 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
@@ -379,6 +379,7 @@ declare <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>,
define <256 x i8> @vssubu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vssubu_vi_v258i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: addi a3, a1, -128
@@ -394,6 +395,7 @@ define <256 x i8> @vssubu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
index e6dfe5e78cdb4b..3a6dc6261a70f6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
@@ -117,6 +117,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.floor.nxv4bf16(<vscale x 4 x bfloat>, <vs
define <vscale x 4 x bfloat> @vp_floor_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv4bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -169,6 +170,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.floor.nxv8bf16(<vscale x 8 x bfloat>, <vs
define <vscale x 8 x bfloat> @vp_floor_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv8bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -221,6 +223,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.floor.nxv16bf16(<vscale x 16 x bfloat>,
define <vscale x 16 x bfloat> @vp_floor_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv16bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -279,6 +282,7 @@ define <vscale x 32 x bfloat> @vp_floor_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -582,6 +586,7 @@ define <vscale x 4 x half> @vp_floor_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vp_floor_nxv4f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -649,6 +654,7 @@ declare <vscale x 8 x half> @llvm.vp.floor.nxv8f16(<vscale x 8 x half>, <vscale
define <vscale x 8 x half> @vp_floor_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_nxv8f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -668,6 +674,7 @@ define <vscale x 8 x half> @vp_floor_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vp_floor_nxv8f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -735,6 +742,7 @@ declare <vscale x 16 x half> @llvm.vp.floor.nxv16f16(<vscale x 16 x half>, <vsca
define <vscale x 16 x half> @vp_floor_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_nxv16f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -754,6 +762,7 @@ define <vscale x 16 x half> @vp_floor_nxv16f16(<vscale x 16 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vp_floor_nxv16f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -821,6 +830,7 @@ declare <vscale x 32 x half> @llvm.vp.floor.nxv32f16(<vscale x 32 x half>, <vsca
define <vscale x 32 x half> @vp_floor_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_nxv32f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -846,6 +856,7 @@ define <vscale x 32 x half> @vp_floor_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -1068,6 +1079,7 @@ declare <vscale x 4 x float> @llvm.vp.floor.nxv4f32(<vscale x 4 x float>, <vscal
define <vscale x 4 x float> @vp_floor_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv4f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1112,6 +1124,7 @@ declare <vscale x 8 x float> @llvm.vp.floor.nxv8f32(<vscale x 8 x float>, <vscal
define <vscale x 8 x float> @vp_floor_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv8f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1156,6 +1169,7 @@ declare <vscale x 16 x float> @llvm.vp.floor.nxv16f32(<vscale x 16 x float>, <vs
define <vscale x 16 x float> @vp_floor_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv16f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1242,6 +1256,7 @@ declare <vscale x 2 x double> @llvm.vp.floor.nxv2f64(<vscale x 2 x double>, <vsc
define <vscale x 2 x double> @vp_floor_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv2f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1286,6 +1301,7 @@ declare <vscale x 4 x double> @llvm.vp.floor.nxv4f64(<vscale x 4 x double>, <vsc
define <vscale x 4 x double> @vp_floor_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv4f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1330,6 +1346,7 @@ declare <vscale x 7 x double> @llvm.vp.floor.nxv7f64(<vscale x 7 x double>, <vsc
define <vscale x 7 x double> @vp_floor_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv7f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1374,6 +1391,7 @@ declare <vscale x 8 x double> @llvm.vp.floor.nxv8f64(<vscale x 8 x double>, <vsc
define <vscale x 8 x double> @vp_floor_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv8f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1425,6 +1443,7 @@ define <vscale x 16 x double> @vp_floor_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
@@ -1458,6 +1477,7 @@ define <vscale x 16 x double> @vp_floor_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB44_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
index 734dd5e33c4fcb..616146e3675956 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
@@ -153,6 +153,7 @@ define <vscale x 32 x bfloat> @vfmax_nxv32bf16_vv(<vscale x 32 x bfloat> %a, <vs
; ZVFH-NEXT: slli a0, a0, 1
; ZVFH-NEXT: add a0, a0, a1
; ZVFH-NEXT: sub sp, sp, a0
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv8r.v v24, v16
; ZVFH-NEXT: csrr a0, vlenb
; ZVFH-NEXT: slli a0, a0, 3
@@ -228,6 +229,7 @@ define <vscale x 32 x bfloat> @vfmax_nxv32bf16_vv(<vscale x 32 x bfloat> %a, <vs
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
@@ -498,6 +500,7 @@ define <vscale x 32 x half> @vfmax_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
index e90d3e3f3e8295..5cba24e02a3ffb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
@@ -19,6 +19,7 @@ declare <vscale x 1 x bfloat> @llvm.vp.maximum.nxv1bf16(<vscale x 1 x bfloat>, <
define <vscale x 1 x bfloat> @vfmax_vv_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv1bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v11, v8
@@ -65,6 +66,7 @@ declare <vscale x 2 x bfloat> @llvm.vp.maximum.nxv2bf16(<vscale x 2 x bfloat>, <
define <vscale x 2 x bfloat> @vfmax_vv_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv2bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v11, v8
@@ -111,6 +113,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.maximum.nxv4bf16(<vscale x 4 x bfloat>, <
define <vscale x 4 x bfloat> @vfmax_vv_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv4bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -159,6 +162,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.maximum.nxv8bf16(<vscale x 8 x bfloat>, <
define <vscale x 8 x bfloat> @vfmax_vv_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv8bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -213,6 +217,7 @@ define <vscale x 16 x bfloat> @vfmax_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8
@@ -565,6 +570,7 @@ declare <vscale x 1 x half> @llvm.vp.maximum.nxv1f16(<vscale x 1 x half>, <vscal
define <vscale x 1 x half> @vfmax_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_nxv1f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -578,6 +584,7 @@ define <vscale x 1 x half> @vfmax_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmax_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -635,6 +642,7 @@ declare <vscale x 2 x half> @llvm.vp.maximum.nxv2f16(<vscale x 2 x half>, <vscal
define <vscale x 2 x half> @vfmax_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_nxv2f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -648,6 +656,7 @@ define <vscale x 2 x half> @vfmax_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmax_vv_nxv2f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -705,6 +714,7 @@ declare <vscale x 4 x half> @llvm.vp.maximum.nxv4f16(<vscale x 4 x half>, <vscal
define <vscale x 4 x half> @vfmax_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_nxv4f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -718,6 +728,7 @@ define <vscale x 4 x half> @vfmax_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmax_vv_nxv4f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -777,6 +788,7 @@ declare <vscale x 8 x half> @llvm.vp.maximum.nxv8f16(<vscale x 8 x half>, <vscal
define <vscale x 8 x half> @vfmax_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_nxv8f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -792,6 +804,7 @@ define <vscale x 8 x half> @vfmax_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmax_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -851,6 +864,7 @@ declare <vscale x 16 x half> @llvm.vp.maximum.nxv16f16(<vscale x 16 x half>, <vs
define <vscale x 16 x half> @vfmax_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_nxv16f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -872,6 +886,7 @@ define <vscale x 16 x half> @vfmax_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
@@ -961,6 +976,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFH-NEXT: slli a1, a1, 3
; ZVFH-NEXT: sub sp, sp, a1
; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v7, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vmfeq.vv v25, v8, v8, v0.t
@@ -1277,6 +1293,7 @@ declare <vscale x 1 x float> @llvm.vp.maximum.nxv1f32(<vscale x 1 x float>, <vsc
define <vscale x 1 x float> @vfmax_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv1f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -1311,6 +1328,7 @@ declare <vscale x 2 x float> @llvm.vp.maximum.nxv2f32(<vscale x 2 x float>, <vsc
define <vscale x 2 x float> @vfmax_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv2f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -1345,6 +1363,7 @@ declare <vscale x 4 x float> @llvm.vp.maximum.nxv4f32(<vscale x 4 x float>, <vsc
define <vscale x 4 x float> @vfmax_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv4f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -1381,6 +1400,7 @@ declare <vscale x 8 x float> @llvm.vp.maximum.nxv8f32(<vscale x 8 x float>, <vsc
define <vscale x 8 x float> @vfmax_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv8f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -1417,6 +1437,7 @@ declare <vscale x 1 x double> @llvm.vp.maximum.nxv1f64(<vscale x 1 x double>, <v
define <vscale x 1 x double> @vfmax_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv1f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -1451,6 +1472,7 @@ declare <vscale x 2 x double> @llvm.vp.maximum.nxv2f64(<vscale x 2 x double>, <v
define <vscale x 2 x double> @vfmax_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv2f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -1487,6 +1509,7 @@ declare <vscale x 4 x double> @llvm.vp.maximum.nxv4f64(<vscale x 4 x double>, <v
define <vscale x 4 x double> @vfmax_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv4f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -1529,6 +1552,7 @@ define <vscale x 8 x double> @vfmax_vv_nxv8f64(<vscale x 8 x double> %va, <vscal
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
@@ -1581,6 +1605,7 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -1633,6 +1658,7 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB40_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
index 21251ee2f3c630..ddbb513c6d9aad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
@@ -153,6 +153,7 @@ define <vscale x 32 x bfloat> @vfmin_nxv32bf16_vv(<vscale x 32 x bfloat> %a, <vs
; ZVFH-NEXT: slli a0, a0, 1
; ZVFH-NEXT: add a0, a0, a1
; ZVFH-NEXT: sub sp, sp, a0
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv8r.v v24, v16
; ZVFH-NEXT: csrr a0, vlenb
; ZVFH-NEXT: slli a0, a0, 3
@@ -228,6 +229,7 @@ define <vscale x 32 x bfloat> @vfmin_nxv32bf16_vv(<vscale x 32 x bfloat> %a, <vs
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
@@ -498,6 +500,7 @@ define <vscale x 32 x half> @vfmin_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
index 818a90607ea073..a26f14ef42dc10 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
@@ -19,6 +19,7 @@ declare <vscale x 1 x bfloat> @llvm.vp.minimum.nxv1bf16(<vscale x 1 x bfloat>, <
define <vscale x 1 x bfloat> @vfmin_vv_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv1bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v11, v8
@@ -65,6 +66,7 @@ declare <vscale x 2 x bfloat> @llvm.vp.minimum.nxv2bf16(<vscale x 2 x bfloat>, <
define <vscale x 2 x bfloat> @vfmin_vv_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv2bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v11, v8
@@ -111,6 +113,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.minimum.nxv4bf16(<vscale x 4 x bfloat>, <
define <vscale x 4 x bfloat> @vfmin_vv_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv4bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -159,6 +162,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.minimum.nxv8bf16(<vscale x 8 x bfloat>, <
define <vscale x 8 x bfloat> @vfmin_vv_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv8bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -213,6 +217,7 @@ define <vscale x 16 x bfloat> @vfmin_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8
@@ -565,6 +570,7 @@ declare <vscale x 1 x half> @llvm.vp.minimum.nxv1f16(<vscale x 1 x half>, <vscal
define <vscale x 1 x half> @vfmin_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_nxv1f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -578,6 +584,7 @@ define <vscale x 1 x half> @vfmin_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmin_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -635,6 +642,7 @@ declare <vscale x 2 x half> @llvm.vp.minimum.nxv2f16(<vscale x 2 x half>, <vscal
define <vscale x 2 x half> @vfmin_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_nxv2f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -648,6 +656,7 @@ define <vscale x 2 x half> @vfmin_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmin_vv_nxv2f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -705,6 +714,7 @@ declare <vscale x 4 x half> @llvm.vp.minimum.nxv4f16(<vscale x 4 x half>, <vscal
define <vscale x 4 x half> @vfmin_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_nxv4f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -718,6 +728,7 @@ define <vscale x 4 x half> @vfmin_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmin_vv_nxv4f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -777,6 +788,7 @@ declare <vscale x 8 x half> @llvm.vp.minimum.nxv8f16(<vscale x 8 x half>, <vscal
define <vscale x 8 x half> @vfmin_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_nxv8f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -792,6 +804,7 @@ define <vscale x 8 x half> @vfmin_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmin_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -851,6 +864,7 @@ declare <vscale x 16 x half> @llvm.vp.minimum.nxv16f16(<vscale x 16 x half>, <vs
define <vscale x 16 x half> @vfmin_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_nxv16f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -872,6 +886,7 @@ define <vscale x 16 x half> @vfmin_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
@@ -961,6 +976,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFH-NEXT: slli a1, a1, 3
; ZVFH-NEXT: sub sp, sp, a1
; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v7, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vmfeq.vv v25, v8, v8, v0.t
@@ -1277,6 +1293,7 @@ declare <vscale x 1 x float> @llvm.vp.minimum.nxv1f32(<vscale x 1 x float>, <vsc
define <vscale x 1 x float> @vfmin_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv1f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -1311,6 +1328,7 @@ declare <vscale x 2 x float> @llvm.vp.minimum.nxv2f32(<vscale x 2 x float>, <vsc
define <vscale x 2 x float> @vfmin_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv2f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -1345,6 +1363,7 @@ declare <vscale x 4 x float> @llvm.vp.minimum.nxv4f32(<vscale x 4 x float>, <vsc
define <vscale x 4 x float> @vfmin_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv4f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -1381,6 +1400,7 @@ declare <vscale x 8 x float> @llvm.vp.minimum.nxv8f32(<vscale x 8 x float>, <vsc
define <vscale x 8 x float> @vfmin_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv8f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -1417,6 +1437,7 @@ declare <vscale x 1 x double> @llvm.vp.minimum.nxv1f64(<vscale x 1 x double>, <v
define <vscale x 1 x double> @vfmin_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv1f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -1451,6 +1472,7 @@ declare <vscale x 2 x double> @llvm.vp.minimum.nxv2f64(<vscale x 2 x double>, <v
define <vscale x 2 x double> @vfmin_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv2f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -1487,6 +1509,7 @@ declare <vscale x 4 x double> @llvm.vp.minimum.nxv4f64(<vscale x 4 x double>, <v
define <vscale x 4 x double> @vfmin_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv4f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -1529,6 +1552,7 @@ define <vscale x 8 x double> @vfmin_vv_nxv8f64(<vscale x 8 x double> %va, <vscal
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
@@ -1581,6 +1605,7 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -1633,6 +1658,7 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB40_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll b/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
index 025874a1a74e2e..948c8e0ecb9b95 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
@@ -18,6 +18,7 @@ define i32 @test(i32 %size, ptr %add.ptr, i64 %const) {
; RV32-NEXT: .LBB0_1: # %for.body
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
; RV32-NEXT: th.lrb a0, a1, a0, 0
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v9, v8
; RV32-NEXT: vsetivli zero, 8, e8, m1, tu, ma
; RV32-NEXT: vmv.s.x v9, a0
@@ -45,6 +46,7 @@ define i32 @test(i32 %size, ptr %add.ptr, i64 %const) {
; RV64-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: th.lrb a0, a1, a0, 0
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v9, v8
; RV64-NEXT: vsetivli zero, 8, e8, m1, tu, ma
; RV64-NEXT: vmv.s.x v9, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
index c7e3c8cb519829..1b40dd2f9c8b32 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
@@ -703,6 +703,7 @@ define <vscale x 16 x i32> @fshl_v16i32(<vscale x 16 x i32> %a, <vscale x 16 x i
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: li a0, 31
@@ -882,6 +883,7 @@ define <vscale x 7 x i64> @fshl_v7i64(<vscale x 7 x i64> %a, <vscale x 7 x i64>
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: li a0, 63
@@ -953,6 +955,7 @@ define <vscale x 8 x i64> @fshl_v8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64>
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: li a0, 63
@@ -988,6 +991,7 @@ define <vscale x 16 x i64> @fshr_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a3, 24
@@ -1103,6 +1107,7 @@ define <vscale x 16 x i64> @fshr_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB46_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 40
@@ -1173,6 +1178,7 @@ define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 5
@@ -1261,6 +1267,7 @@ define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a3
; CHECK-NEXT: .LBB47_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
diff --git a/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll b/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll
index 967a58b45a599b..328c1ab3bddff9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll
@@ -365,11 +365,13 @@ entry:
define <vscale x 4 x i8> @test_specify_reg_mf2(<vscale x 4 x i8> %in, <vscale x 4 x i8> %in2) nounwind {
; CHECK-LABEL: test_specify_reg_mf2:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v2, v9
; CHECK-NEXT: vmv1r.v v1, v8
; CHECK-NEXT: #APP
; CHECK-NEXT: vadd.vv v0, v1, v2
; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: ret
entry:
@@ -380,11 +382,13 @@ entry:
define <vscale x 8 x i8> @test_specify_reg_m1(<vscale x 8 x i8> %in, <vscale x 8 x i8> %in2) nounwind {
; CHECK-LABEL: test_specify_reg_m1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v2, v9
; CHECK-NEXT: vmv1r.v v1, v8
; CHECK-NEXT: #APP
; CHECK-NEXT: vadd.vv v0, v1, v2
; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: ret
entry:
@@ -395,11 +399,13 @@ entry:
define <vscale x 16 x i8> @test_specify_reg_m2(<vscale x 16 x i8> %in, <vscale x 16 x i8> %in2) nounwind {
; CHECK-LABEL: test_specify_reg_m2:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v4, v10
; CHECK-NEXT: vmv2r.v v2, v8
; CHECK-NEXT: #APP
; CHECK-NEXT: vadd.vv v0, v2, v4
; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v0
; CHECK-NEXT: ret
entry:
@@ -410,6 +416,7 @@ entry:
define <vscale x 1 x i1> @test_specify_reg_mask(<vscale x 1 x i1> %in, <vscale x 1 x i1> %in2) nounwind {
; CHECK-LABEL: test_specify_reg_mask:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v2, v8
; CHECK-NEXT: vmv1r.v v1, v0
; CHECK-NEXT: #APP
diff --git a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
index 8925a9e0cee321..7b654d37234793 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
@@ -5,6 +5,7 @@
define <vscale x 8 x i32> @insert_nxv8i32_nxv4i32_0(<vscale x 8 x i32> %vec, <vscale x 4 x i32> %subvec) {
; CHECK-LABEL: insert_nxv8i32_nxv4i32_0:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 4 x i32> %subvec, i64 0)
@@ -14,6 +15,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv4i32_0(<vscale x 8 x i32> %vec, <vs
define <vscale x 8 x i32> @insert_nxv8i32_nxv4i32_4(<vscale x 8 x i32> %vec, <vscale x 4 x i32> %subvec) {
; CHECK-LABEL: insert_nxv8i32_nxv4i32_4:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v10, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 4 x i32> %subvec, i64 4)
@@ -23,6 +25,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv4i32_4(<vscale x 8 x i32> %vec, <vs
define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_0(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv8i32_nxv2i32_0:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec, i64 0)
@@ -32,6 +35,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_0(<vscale x 8 x i32> %vec, <vs
define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_2(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv8i32_nxv2i32_2:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec, i64 2)
@@ -41,6 +45,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_2(<vscale x 8 x i32> %vec, <vs
define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_4(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv8i32_nxv2i32_4:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec, i64 4)
@@ -50,6 +55,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_4(<vscale x 8 x i32> %vec, <vs
define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_6(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv8i32_nxv2i32_6:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec, i64 6)
@@ -86,6 +92,7 @@ define <vscale x 4 x i8> @insert_nxv1i8_nxv4i8_3(<vscale x 4 x i8> %vec, <vscale
define <vscale x 16 x i32> @insert_nxv16i32_nxv8i32_0(<vscale x 16 x i32> %vec, <vscale x 8 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv8i32_0:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 8 x i32> %subvec, i64 0)
@@ -95,6 +102,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv8i32_0(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv8i32_8(<vscale x 16 x i32> %vec, <vscale x 8 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv8i32_8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v12, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 8 x i32> %subvec, i64 8)
@@ -104,6 +112,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv8i32_8(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_0(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv4i32_0:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec, i64 0)
@@ -113,6 +122,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_0(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_4(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv4i32_4:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v10, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec, i64 4)
@@ -122,6 +132,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_4(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_8(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv4i32_8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v12, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec, i64 8)
@@ -131,6 +142,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_8(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_12(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv4i32_12:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v14, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec, i64 12)
@@ -140,6 +152,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_12(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_0(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_0:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 0)
@@ -149,6 +162,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_0(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_2(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_2:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 2)
@@ -158,6 +172,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_2(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_4(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_4:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 4)
@@ -167,6 +182,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_4(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_6(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_6:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 6)
@@ -176,6 +192,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_6(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_8(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 8)
@@ -185,6 +202,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_8(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_10(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_10:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 10)
@@ -194,6 +212,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_10(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_12(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_12:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v14, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 12)
@@ -203,6 +222,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_12(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_14(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_14:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v15, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 14)
@@ -512,6 +532,7 @@ define <vscale x 2 x i64> @insert_nxv2i64_nxv3i64(<3 x i64> %sv) #0 {
define <vscale x 8 x i32> @insert_insert_combine(<2 x i32> %subvec) {
; CHECK-LABEL: insert_insert_combine:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: ret
%inner = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v2i32(<vscale x 4 x i32> undef, <2 x i32> %subvec, i64 0)
@@ -524,6 +545,7 @@ define <vscale x 8 x i32> @insert_insert_combine(<2 x i32> %subvec) {
define <vscale x 8 x i32> @insert_insert_combine2(<vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_insert_combine2:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: ret
%inner = call <vscale x 4 x i32> @llvm.vector.insert.nxv2i32.nxv4i32(<vscale x 4 x i32> undef, <vscale x 2 x i32> %subvec, i64 0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll
index ffb9bf76fb4fab..8ddbf9de0b6e29 100644
--- a/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll
@@ -55,6 +55,7 @@ declare <vscale x 8 x i64> @llvm.vp.llrint.nxv8i64.nxv8f32(<vscale x 8 x float>,
define <vscale x 16 x i64> @llrint_nxv16i64_nxv16f32(<vscale x 16 x float> %x, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: llrint_nxv16i64_nxv16f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
@@ -70,6 +71,7 @@ define <vscale x 16 x i64> @llrint_nxv16i64_nxv16f32(<vscale x 16 x float> %x, <
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB4_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfwcvt.x.f.v v24, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll
index 9991bbc9725ba3..645af077e1a860 100644
--- a/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll
@@ -117,6 +117,7 @@ define <vscale x 16 x iXLen> @lrint_nxv16f32(<vscale x 16 x float> %x, <vscale x
;
; RV64-i64-LABEL: lrint_nxv16f32:
; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-i64-NEXT: vmv1r.v v24, v0
; RV64-i64-NEXT: csrr a1, vlenb
; RV64-i64-NEXT: srli a2, a1, 3
@@ -132,6 +133,7 @@ define <vscale x 16 x iXLen> @lrint_nxv16f32(<vscale x 16 x float> %x, <vscale x
; RV64-i64-NEXT: # %bb.1:
; RV64-i64-NEXT: mv a0, a1
; RV64-i64-NEXT: .LBB4_2:
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-i64-NEXT: vmv1r.v v0, v24
; RV64-i64-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; RV64-i64-NEXT: vfwcvt.x.f.v v24, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
index 420597b009f33f..d06c809833f81a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
@@ -1288,6 +1288,7 @@ declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
@@ -1312,6 +1313,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
@@ -1443,6 +1445,7 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
index a3eddbcc2baed4..ec774b6f641b43 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
@@ -221,11 +221,13 @@ define <vscale x 4 x i8> @mgather_truemask_nxv4i8(<vscale x 4 x ptr> %ptrs, <vsc
define <vscale x 4 x i8> @mgather_falsemask_nxv4i8(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i8> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4i8:
; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4i8:
; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i8> %passthru)
@@ -442,11 +444,13 @@ define <vscale x 4 x i16> @mgather_truemask_nxv4i16(<vscale x 4 x ptr> %ptrs, <v
define <vscale x 4 x i16> @mgather_falsemask_nxv4i16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i16> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4i16:
; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4i16:
; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i16> %passthru)
@@ -686,11 +690,13 @@ define <vscale x 4 x i32> @mgather_truemask_nxv4i32(<vscale x 4 x ptr> %ptrs, <v
define <vscale x 4 x i32> @mgather_falsemask_nxv4i32(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i32> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4i32:
; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4i32:
; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv2r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i32> %passthru)
@@ -949,6 +955,7 @@ define <vscale x 4 x i64> @mgather_truemask_nxv4i64(<vscale x 4 x ptr> %ptrs, <v
define <vscale x 4 x i64> @mgather_falsemask_nxv4i64(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i64> %passthru) {
; CHECK-LABEL: mgather_falsemask_nxv4i64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i64> %passthru)
@@ -1232,6 +1239,7 @@ define void @mgather_nxv16i64(<vscale x 8 x ptr> %ptrs0, <vscale x 8 x ptr> %ptr
; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; RV64-NEXT: addi a3, sp, 16
; RV64-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv8r.v v16, v8
; RV64-NEXT: vl8re64.v v24, (a0)
; RV64-NEXT: csrr a0, vlenb
@@ -1348,11 +1356,13 @@ define <vscale x 4 x bfloat> @mgather_truemask_nxv4bf16(<vscale x 4 x ptr> %ptrs
define <vscale x 4 x bfloat> @mgather_falsemask_nxv4bf16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x bfloat> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4bf16:
; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4bf16:
; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x bfloat> %passthru)
@@ -1549,11 +1559,13 @@ define <vscale x 4 x half> @mgather_truemask_nxv4f16(<vscale x 4 x ptr> %ptrs, <
define <vscale x 4 x half> @mgather_falsemask_nxv4f16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x half> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4f16:
; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4f16:
; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x half> %passthru)
@@ -1749,11 +1761,13 @@ define <vscale x 4 x float> @mgather_truemask_nxv4f32(<vscale x 4 x ptr> %ptrs,
define <vscale x 4 x float> @mgather_falsemask_nxv4f32(<vscale x 4 x ptr> %ptrs, <vscale x 4 x float> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4f32:
; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4f32:
; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv2r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x float> %passthru)
@@ -2012,6 +2026,7 @@ define <vscale x 4 x double> @mgather_truemask_nxv4f64(<vscale x 4 x ptr> %ptrs,
define <vscale x 4 x double> @mgather_falsemask_nxv4f64(<vscale x 4 x ptr> %ptrs, <vscale x 4 x double> %passthru) {
; CHECK-LABEL: mgather_falsemask_nxv4f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x double> %passthru)
@@ -2317,6 +2332,7 @@ define <vscale x 32 x i8> @mgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8>
;
; RV64-LABEL: mgather_baseidx_nxv32i8:
; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v16, v0
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v24, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
index 72c251ce985cbf..0ad841f0ebf869 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
@@ -2009,6 +2009,7 @@ define void @mscatter_baseidx_nxv16i16_nxv16f64(<vscale x 8 x double> %val0, <vs
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv8r.v v16, v8
; RV32-NEXT: vl4re16.v v8, (a1)
; RV32-NEXT: csrr a1, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
index c49a55319a3c41..185a91271992e3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
@@ -1169,6 +1169,7 @@ define <vscale x 32 x i8> @reverse_nxv32i8(<vscale x 32 x i8> %a) {
define <vscale x 64 x i8> @reverse_nxv64i8(<vscale x 64 x i8> %a) {
; RV32-BITS-UNKNOWN-LABEL: reverse_nxv64i8:
; RV32-BITS-UNKNOWN: # %bb.0:
+; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-BITS-UNKNOWN-NEXT: vmv8r.v v16, v8
; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb
; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1
@@ -1188,6 +1189,7 @@ define <vscale x 64 x i8> @reverse_nxv64i8(<vscale x 64 x i8> %a) {
;
; RV32-BITS-256-LABEL: reverse_nxv64i8:
; RV32-BITS-256: # %bb.0:
+; RV32-BITS-256-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-BITS-256-NEXT: vmv8r.v v16, v8
; RV32-BITS-256-NEXT: csrr a0, vlenb
; RV32-BITS-256-NEXT: addi a0, a0, -1
@@ -1206,6 +1208,7 @@ define <vscale x 64 x i8> @reverse_nxv64i8(<vscale x 64 x i8> %a) {
;
; RV32-BITS-512-LABEL: reverse_nxv64i8:
; RV32-BITS-512: # %bb.0:
+; RV32-BITS-512-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-BITS-512-NEXT: vmv8r.v v16, v8
; RV32-BITS-512-NEXT: csrr a0, vlenb
; RV32-BITS-512-NEXT: addi a0, a0, -1
@@ -1224,6 +1227,7 @@ define <vscale x 64 x i8> @reverse_nxv64i8(<vscale x 64 x i8> %a) {
;
; RV64-BITS-UNKNOWN-LABEL: reverse_nxv64i8:
; RV64-BITS-UNKNOWN: # %bb.0:
+; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-BITS-UNKNOWN-NEXT: vmv8r.v v16, v8
; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb
; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1
@@ -1243,6 +1247,7 @@ define <vscale x 64 x i8> @reverse_nxv64i8(<vscale x 64 x i8> %a) {
;
; RV64-BITS-256-LABEL: reverse_nxv64i8:
; RV64-BITS-256: # %bb.0:
+; RV64-BITS-256-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-BITS-256-NEXT: vmv8r.v v16, v8
; RV64-BITS-256-NEXT: csrr a0, vlenb
; RV64-BITS-256-NEXT: addi a0, a0, -1
@@ -1261,6 +1266,7 @@ define <vscale x 64 x i8> @reverse_nxv64i8(<vscale x 64 x i8> %a) {
;
; RV64-BITS-512-LABEL: reverse_nxv64i8:
; RV64-BITS-512: # %bb.0:
+; RV64-BITS-512-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-BITS-512-NEXT: vmv8r.v v16, v8
; RV64-BITS-512-NEXT: csrr a0, vlenb
; RV64-BITS-512-NEXT: addi a0, a0, -1
@@ -1367,6 +1373,7 @@ define <vscale x 16 x i16> @reverse_nxv16i16(<vscale x 16 x i16> %a) {
define <vscale x 32 x i16> @reverse_nxv32i16(<vscale x 32 x i16> %a) {
; CHECK-LABEL: reverse_nxv32i16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 1
@@ -1458,6 +1465,7 @@ define <vscale x 8 x i32> @reverse_nxv8i32(<vscale x 8 x i32> %a) {
define <vscale x 16 x i32> @reverse_nxv16i32(<vscale x 16 x i32> %a) {
; CHECK-LABEL: reverse_nxv16i32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
@@ -1533,6 +1541,7 @@ define <vscale x 4 x i64> @reverse_nxv4i64(<vscale x 4 x i64> %a) {
define <vscale x 8 x i64> @reverse_nxv8i64(<vscale x 8 x i64> %a) {
; CHECK-LABEL: reverse_nxv8i64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
@@ -1644,6 +1653,7 @@ define <vscale x 16 x bfloat> @reverse_nxv16bf16(<vscale x 16 x bfloat> %a) {
define <vscale x 32 x bfloat> @reverse_nxv32bf16(<vscale x 32 x bfloat> %a) {
; CHECK-LABEL: reverse_nxv32bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 1
@@ -1751,6 +1761,7 @@ define <vscale x 16 x half> @reverse_nxv16f16(<vscale x 16 x half> %a) {
define <vscale x 32 x half> @reverse_nxv32f16(<vscale x 32 x half> %a) {
; CHECK-LABEL: reverse_nxv32f16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 1
@@ -1842,6 +1853,7 @@ define <vscale x 8 x float> @reverse_nxv8f32(<vscale x 8 x float> %a) {
define <vscale x 16 x float> @reverse_nxv16f32(<vscale x 16 x float> %a) {
; CHECK-LABEL: reverse_nxv16f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
@@ -1917,6 +1929,7 @@ define <vscale x 4 x double> @reverse_nxv4f64(<vscale x 4 x double> %a) {
define <vscale x 8 x double> @reverse_nxv8f64(<vscale x 8 x double> %a) {
; CHECK-LABEL: reverse_nxv8f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
index 94fce80ad3b8e4..3d1696c7c6afa9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
@@ -117,6 +117,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.nearbyint.nxv4bf16(<vscale x 4 x bfloat>,
define <vscale x 4 x bfloat> @vp_nearbyint_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv4bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -169,6 +170,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.nearbyint.nxv8bf16(<vscale x 8 x bfloat>,
define <vscale x 8 x bfloat> @vp_nearbyint_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv8bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -221,6 +223,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.nearbyint.nxv16bf16(<vscale x 16 x bfloa
define <vscale x 16 x bfloat> @vp_nearbyint_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv16bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -273,6 +276,7 @@ declare <vscale x 32 x bfloat> @llvm.vp.nearbyint.nxv32bf16(<vscale x 32 x bfloa
define <vscale x 32 x bfloat> @vp_nearbyint_nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv32bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -566,6 +570,7 @@ define <vscale x 4 x half> @vp_nearbyint_nxv4f16(<vscale x 4 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vp_nearbyint_nxv4f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -633,6 +638,7 @@ declare <vscale x 8 x half> @llvm.vp.nearbyint.nxv8f16(<vscale x 8 x half>, <vsc
define <vscale x 8 x half> @vp_nearbyint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv8f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -652,6 +658,7 @@ define <vscale x 8 x half> @vp_nearbyint_nxv8f16(<vscale x 8 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vp_nearbyint_nxv8f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -719,6 +726,7 @@ declare <vscale x 16 x half> @llvm.vp.nearbyint.nxv16f16(<vscale x 16 x half>, <
define <vscale x 16 x half> @vp_nearbyint_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv16f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -738,6 +746,7 @@ define <vscale x 16 x half> @vp_nearbyint_nxv16f16(<vscale x 16 x half> %va, <vs
;
; ZVFHMIN-LABEL: vp_nearbyint_nxv16f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -805,6 +814,7 @@ declare <vscale x 32 x half> @llvm.vp.nearbyint.nxv32f16(<vscale x 32 x half>, <
define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv32f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -824,6 +834,7 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vs
;
; ZVFHMIN-LABEL: vp_nearbyint_nxv32f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -1036,6 +1047,7 @@ declare <vscale x 4 x float> @llvm.vp.nearbyint.nxv4f32(<vscale x 4 x float>, <v
define <vscale x 4 x float> @vp_nearbyint_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv4f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1080,6 +1092,7 @@ declare <vscale x 8 x float> @llvm.vp.nearbyint.nxv8f32(<vscale x 8 x float>, <v
define <vscale x 8 x float> @vp_nearbyint_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv8f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1124,6 +1137,7 @@ declare <vscale x 16 x float> @llvm.vp.nearbyint.nxv16f32(<vscale x 16 x float>,
define <vscale x 16 x float> @vp_nearbyint_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv16f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1210,6 +1224,7 @@ declare <vscale x 2 x double> @llvm.vp.nearbyint.nxv2f64(<vscale x 2 x double>,
define <vscale x 2 x double> @vp_nearbyint_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv2f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1254,6 +1269,7 @@ declare <vscale x 4 x double> @llvm.vp.nearbyint.nxv4f64(<vscale x 4 x double>,
define <vscale x 4 x double> @vp_nearbyint_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv4f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1298,6 +1314,7 @@ declare <vscale x 7 x double> @llvm.vp.nearbyint.nxv7f64(<vscale x 7 x double>,
define <vscale x 7 x double> @vp_nearbyint_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv7f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1342,6 +1359,7 @@ declare <vscale x 8 x double> @llvm.vp.nearbyint.nxv8f64(<vscale x 8 x double>,
define <vscale x 8 x double> @vp_nearbyint_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv8f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1387,6 +1405,7 @@ declare <vscale x 16 x double> @llvm.vp.nearbyint.nxv16f64(<vscale x 16 x double
define <vscale x 16 x double> @vp_nearbyint_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv16f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
@@ -1415,6 +1434,7 @@ define <vscale x 16 x double> @vp_nearbyint_nxv16f64(<vscale x 16 x double> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB44_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr88576.ll b/llvm/test/CodeGen/RISCV/rvv/pr88576.ll
index 37c67b9ff2f6af..2fd4233352440a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr88576.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr88576.ll
@@ -23,6 +23,7 @@ define i1 @foo(<vscale x 16 x i8> %x, i64 %y) {
; CHECK-NEXT: slli a2, a2, 4
; CHECK-NEXT: sub sp, sp, a2
; CHECK-NEXT: andi sp, sp, -64
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: addi a2, sp, 64
; CHECK-NEXT: slli a1, a1, 3
@@ -53,6 +54,7 @@ define i1 @foo(<vscale x 16 x i8> %x, i64 %y) {
define i8 @bar(<vscale x 128 x i1> %x, i64 %y) {
; CHECK-LABEL: bar:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v1, v8
; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v0, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
index 2a69dd31118bd8..85113e77e50304 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
@@ -109,6 +109,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.rint.nxv4bf16(<vscale x 4 x bfloat>, <vsc
define <vscale x 4 x bfloat> @vp_rint_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv4bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -157,6 +158,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.rint.nxv8bf16(<vscale x 8 x bfloat>, <vsc
define <vscale x 8 x bfloat> @vp_rint_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv8bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -205,6 +207,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.rint.nxv16bf16(<vscale x 16 x bfloat>, <
define <vscale x 16 x bfloat> @vp_rint_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv16bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -259,6 +262,7 @@ define <vscale x 32 x bfloat> @vp_rint_nxv32bf16(<vscale x 32 x bfloat> %va, <vs
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -535,6 +539,7 @@ define <vscale x 4 x half> @vp_rint_nxv4f16(<vscale x 4 x half> %va, <vscale x 4
;
; ZVFHMIN-LABEL: vp_rint_nxv4f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -596,6 +601,7 @@ declare <vscale x 8 x half> @llvm.vp.rint.nxv8f16(<vscale x 8 x half>, <vscale x
define <vscale x 8 x half> @vp_rint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv8f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -613,6 +619,7 @@ define <vscale x 8 x half> @vp_rint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8
;
; ZVFHMIN-LABEL: vp_rint_nxv8f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -674,6 +681,7 @@ declare <vscale x 16 x half> @llvm.vp.rint.nxv16f16(<vscale x 16 x half>, <vscal
define <vscale x 16 x half> @vp_rint_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv16f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -691,6 +699,7 @@ define <vscale x 16 x half> @vp_rint_nxv16f16(<vscale x 16 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vp_rint_nxv16f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -752,6 +761,7 @@ declare <vscale x 32 x half> @llvm.vp.rint.nxv32f16(<vscale x 32 x half>, <vscal
define <vscale x 32 x half> @vp_rint_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv32f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -775,6 +785,7 @@ define <vscale x 32 x half> @vp_rint_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -978,6 +989,7 @@ declare <vscale x 4 x float> @llvm.vp.rint.nxv4f32(<vscale x 4 x float>, <vscale
define <vscale x 4 x float> @vp_rint_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv4f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1018,6 +1030,7 @@ declare <vscale x 8 x float> @llvm.vp.rint.nxv8f32(<vscale x 8 x float>, <vscale
define <vscale x 8 x float> @vp_rint_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv8f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1058,6 +1071,7 @@ declare <vscale x 16 x float> @llvm.vp.rint.nxv16f32(<vscale x 16 x float>, <vsc
define <vscale x 16 x float> @vp_rint_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv16f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1136,6 +1150,7 @@ declare <vscale x 2 x double> @llvm.vp.rint.nxv2f64(<vscale x 2 x double>, <vsca
define <vscale x 2 x double> @vp_rint_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv2f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1176,6 +1191,7 @@ declare <vscale x 4 x double> @llvm.vp.rint.nxv4f64(<vscale x 4 x double>, <vsca
define <vscale x 4 x double> @vp_rint_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv4f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1216,6 +1232,7 @@ declare <vscale x 7 x double> @llvm.vp.rint.nxv7f64(<vscale x 7 x double>, <vsca
define <vscale x 7 x double> @vp_rint_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv7f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1256,6 +1273,7 @@ declare <vscale x 8 x double> @llvm.vp.rint.nxv8f64(<vscale x 8 x double>, <vsca
define <vscale x 8 x double> @vp_rint_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv8f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1303,6 +1321,7 @@ define <vscale x 16 x double> @vp_rint_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
@@ -1332,6 +1351,7 @@ define <vscale x 16 x double> @vp_rint_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB44_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
index 8a10e75333ad0a..b8a1429191217b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
@@ -117,6 +117,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.round.nxv4bf16(<vscale x 4 x bfloat>, <vs
define <vscale x 4 x bfloat> @vp_round_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv4bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -169,6 +170,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.round.nxv8bf16(<vscale x 8 x bfloat>, <vs
define <vscale x 8 x bfloat> @vp_round_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv8bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -221,6 +223,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.round.nxv16bf16(<vscale x 16 x bfloat>,
define <vscale x 16 x bfloat> @vp_round_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv16bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -279,6 +282,7 @@ define <vscale x 32 x bfloat> @vp_round_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -582,6 +586,7 @@ define <vscale x 4 x half> @vp_round_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vp_round_nxv4f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -649,6 +654,7 @@ declare <vscale x 8 x half> @llvm.vp.round.nxv8f16(<vscale x 8 x half>, <vscale
define <vscale x 8 x half> @vp_round_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv8f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -668,6 +674,7 @@ define <vscale x 8 x half> @vp_round_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vp_round_nxv8f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -735,6 +742,7 @@ declare <vscale x 16 x half> @llvm.vp.round.nxv16f16(<vscale x 16 x half>, <vsca
define <vscale x 16 x half> @vp_round_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv16f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -754,6 +762,7 @@ define <vscale x 16 x half> @vp_round_nxv16f16(<vscale x 16 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vp_round_nxv16f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -821,6 +830,7 @@ declare <vscale x 32 x half> @llvm.vp.round.nxv32f16(<vscale x 32 x half>, <vsca
define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv32f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -846,6 +856,7 @@ define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -1068,6 +1079,7 @@ declare <vscale x 4 x float> @llvm.vp.round.nxv4f32(<vscale x 4 x float>, <vscal
define <vscale x 4 x float> @vp_round_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv4f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1112,6 +1124,7 @@ declare <vscale x 8 x float> @llvm.vp.round.nxv8f32(<vscale x 8 x float>, <vscal
define <vscale x 8 x float> @vp_round_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv8f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1156,6 +1169,7 @@ declare <vscale x 16 x float> @llvm.vp.round.nxv16f32(<vscale x 16 x float>, <vs
define <vscale x 16 x float> @vp_round_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv16f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1242,6 +1256,7 @@ declare <vscale x 2 x double> @llvm.vp.round.nxv2f64(<vscale x 2 x double>, <vsc
define <vscale x 2 x double> @vp_round_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv2f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1286,6 +1301,7 @@ declare <vscale x 4 x double> @llvm.vp.round.nxv4f64(<vscale x 4 x double>, <vsc
define <vscale x 4 x double> @vp_round_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv4f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1330,6 +1346,7 @@ declare <vscale x 7 x double> @llvm.vp.round.nxv7f64(<vscale x 7 x double>, <vsc
define <vscale x 7 x double> @vp_round_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv7f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1374,6 +1391,7 @@ declare <vscale x 8 x double> @llvm.vp.round.nxv8f64(<vscale x 8 x double>, <vsc
define <vscale x 8 x double> @vp_round_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv8f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1425,6 +1443,7 @@ define <vscale x 16 x double> @vp_round_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
@@ -1458,6 +1477,7 @@ define <vscale x 16 x double> @vp_round_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB44_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
index 4cd909e4b0a637..6e64c381710487 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
@@ -117,6 +117,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.roundeven.nxv4bf16(<vscale x 4 x bfloat>,
define <vscale x 4 x bfloat> @vp_roundeven_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv4bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -169,6 +170,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.roundeven.nxv8bf16(<vscale x 8 x bfloat>,
define <vscale x 8 x bfloat> @vp_roundeven_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv8bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -221,6 +223,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.roundeven.nxv16bf16(<vscale x 16 x bfloa
define <vscale x 16 x bfloat> @vp_roundeven_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv16bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -279,6 +282,7 @@ define <vscale x 32 x bfloat> @vp_roundeven_nxv32bf16(<vscale x 32 x bfloat> %va
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -582,6 +586,7 @@ define <vscale x 4 x half> @vp_roundeven_nxv4f16(<vscale x 4 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vp_roundeven_nxv4f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -649,6 +654,7 @@ declare <vscale x 8 x half> @llvm.vp.roundeven.nxv8f16(<vscale x 8 x half>, <vsc
define <vscale x 8 x half> @vp_roundeven_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv8f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -668,6 +674,7 @@ define <vscale x 8 x half> @vp_roundeven_nxv8f16(<vscale x 8 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vp_roundeven_nxv8f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -735,6 +742,7 @@ declare <vscale x 16 x half> @llvm.vp.roundeven.nxv16f16(<vscale x 16 x half>, <
define <vscale x 16 x half> @vp_roundeven_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv16f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -754,6 +762,7 @@ define <vscale x 16 x half> @vp_roundeven_nxv16f16(<vscale x 16 x half> %va, <vs
;
; ZVFHMIN-LABEL: vp_roundeven_nxv16f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -821,6 +830,7 @@ declare <vscale x 32 x half> @llvm.vp.roundeven.nxv32f16(<vscale x 32 x half>, <
define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv32f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -846,6 +856,7 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vs
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -1068,6 +1079,7 @@ declare <vscale x 4 x float> @llvm.vp.roundeven.nxv4f32(<vscale x 4 x float>, <v
define <vscale x 4 x float> @vp_roundeven_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv4f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1112,6 +1124,7 @@ declare <vscale x 8 x float> @llvm.vp.roundeven.nxv8f32(<vscale x 8 x float>, <v
define <vscale x 8 x float> @vp_roundeven_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv8f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1156,6 +1169,7 @@ declare <vscale x 16 x float> @llvm.vp.roundeven.nxv16f32(<vscale x 16 x float>,
define <vscale x 16 x float> @vp_roundeven_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv16f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1242,6 +1256,7 @@ declare <vscale x 2 x double> @llvm.vp.roundeven.nxv2f64(<vscale x 2 x double>,
define <vscale x 2 x double> @vp_roundeven_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv2f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1286,6 +1301,7 @@ declare <vscale x 4 x double> @llvm.vp.roundeven.nxv4f64(<vscale x 4 x double>,
define <vscale x 4 x double> @vp_roundeven_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv4f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1330,6 +1346,7 @@ declare <vscale x 7 x double> @llvm.vp.roundeven.nxv7f64(<vscale x 7 x double>,
define <vscale x 7 x double> @vp_roundeven_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv7f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1374,6 +1391,7 @@ declare <vscale x 8 x double> @llvm.vp.roundeven.nxv8f64(<vscale x 8 x double>,
define <vscale x 8 x double> @vp_roundeven_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv8f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1425,6 +1443,7 @@ define <vscale x 16 x double> @vp_roundeven_nxv16f64(<vscale x 16 x double> %va,
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
@@ -1458,6 +1477,7 @@ define <vscale x 16 x double> @vp_roundeven_nxv16f64(<vscale x 16 x double> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB44_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
index 96c821a76ae84e..acbb1abceffb60 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
@@ -117,6 +117,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.roundtozero.nxv4bf16(<vscale x 4 x bfloat
define <vscale x 4 x bfloat> @vp_roundtozero_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv4bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -169,6 +170,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.roundtozero.nxv8bf16(<vscale x 8 x bfloat
define <vscale x 8 x bfloat> @vp_roundtozero_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv8bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -221,6 +223,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.roundtozero.nxv16bf16(<vscale x 16 x bfl
define <vscale x 16 x bfloat> @vp_roundtozero_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv16bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -279,6 +282,7 @@ define <vscale x 32 x bfloat> @vp_roundtozero_nxv32bf16(<vscale x 32 x bfloat> %
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -582,6 +586,7 @@ define <vscale x 4 x half> @vp_roundtozero_nxv4f16(<vscale x 4 x half> %va, <vsc
;
; ZVFHMIN-LABEL: vp_roundtozero_nxv4f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -649,6 +654,7 @@ declare <vscale x 8 x half> @llvm.vp.roundtozero.nxv8f16(<vscale x 8 x half>, <v
define <vscale x 8 x half> @vp_roundtozero_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv8f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -668,6 +674,7 @@ define <vscale x 8 x half> @vp_roundtozero_nxv8f16(<vscale x 8 x half> %va, <vsc
;
; ZVFHMIN-LABEL: vp_roundtozero_nxv8f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -735,6 +742,7 @@ declare <vscale x 16 x half> @llvm.vp.roundtozero.nxv16f16(<vscale x 16 x half>,
define <vscale x 16 x half> @vp_roundtozero_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv16f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -754,6 +762,7 @@ define <vscale x 16 x half> @vp_roundtozero_nxv16f16(<vscale x 16 x half> %va, <
;
; ZVFHMIN-LABEL: vp_roundtozero_nxv16f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -821,6 +830,7 @@ declare <vscale x 32 x half> @llvm.vp.roundtozero.nxv32f16(<vscale x 32 x half>,
define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv32f16:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -846,6 +856,7 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -1068,6 +1079,7 @@ declare <vscale x 4 x float> @llvm.vp.roundtozero.nxv4f32(<vscale x 4 x float>,
define <vscale x 4 x float> @vp_roundtozero_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv4f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1112,6 +1124,7 @@ declare <vscale x 8 x float> @llvm.vp.roundtozero.nxv8f32(<vscale x 8 x float>,
define <vscale x 8 x float> @vp_roundtozero_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv8f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1156,6 +1169,7 @@ declare <vscale x 16 x float> @llvm.vp.roundtozero.nxv16f32(<vscale x 16 x float
define <vscale x 16 x float> @vp_roundtozero_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv16f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1242,6 +1256,7 @@ declare <vscale x 2 x double> @llvm.vp.roundtozero.nxv2f64(<vscale x 2 x double>
define <vscale x 2 x double> @vp_roundtozero_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv2f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1286,6 +1301,7 @@ declare <vscale x 4 x double> @llvm.vp.roundtozero.nxv4f64(<vscale x 4 x double>
define <vscale x 4 x double> @vp_roundtozero_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv4f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1330,6 +1346,7 @@ declare <vscale x 7 x double> @llvm.vp.roundtozero.nxv7f64(<vscale x 7 x double>
define <vscale x 7 x double> @vp_roundtozero_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv7f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1374,6 +1391,7 @@ declare <vscale x 8 x double> @llvm.vp.roundtozero.nxv8f64(<vscale x 8 x double>
define <vscale x 8 x double> @vp_roundtozero_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv8f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1425,6 +1443,7 @@ define <vscale x 16 x double> @vp_roundtozero_nxv16f64(<vscale x 16 x double> %v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
@@ -1458,6 +1477,7 @@ define <vscale x 16 x double> @vp_roundtozero_nxv16f64(<vscale x 16 x double> %v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB44_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
index aef160049106b9..cee838aa92585a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
@@ -17,6 +17,7 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O0-NEXT: slli a1, a1, 1
; SPILL-O0-NEXT: sub sp, sp, a1
; SPILL-O0-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
+; SPILL-O0-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; SPILL-O0-NEXT: vmv1r.v v10, v9
; SPILL-O0-NEXT: vmv1r.v v9, v8
; SPILL-O0-NEXT: csrr a1, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
index c7c44fb0e12158..e4d907f2af568e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
@@ -20,6 +20,7 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O0-NEXT: slli a1, a1, 1
; SPILL-O0-NEXT: sub sp, sp, a1
; SPILL-O0-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
+; SPILL-O0-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; SPILL-O0-NEXT: vmv1r.v v10, v9
; SPILL-O0-NEXT: vmv1r.v v9, v8
; SPILL-O0-NEXT: csrr a1, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
index b27ba14e85c839..de64b8e94332a6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
@@ -47,6 +47,7 @@ define <vscale x 16 x i32> @foo(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5,
; CHECK-NEXT: vs8r.v v8, (t1)
; CHECK-NEXT: sd t1, 0(sp)
; CHECK-NEXT: sd t0, 8(sp)
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: call bar
; CHECK-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
index 23ebfade6f6b0f..98a92ae551f6de 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
@@ -941,6 +941,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32(
define <vscale x 2 x i32> @vredsum(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 %vl) {
; CHECK-LABEL: vredsum:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
; CHECK-NEXT: vredsum.vs v11, v9, v10
@@ -965,6 +966,7 @@ define <vscale x 2 x float> @vfredusum(<vscale x 2 x float> %passthru, <vscale x
; CHECK-LABEL: vfredusum:
; CHECK: # %bb.0:
; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
; CHECK-NEXT: vfredusum.vs v11, v9, v10
@@ -1016,6 +1018,7 @@ define <vscale x 2 x float> @vfredusum_allones_mask(<vscale x 2 x float> %passth
define <vscale x 2 x i32> @unfoldable_vredsum_allones_mask_diff_vl(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y) {
; CHECK-LABEL: unfoldable_vredsum_allones_mask_diff_vl:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, ma
; CHECK-NEXT: vredsum.vs v11, v9, v10
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
index 6c11e9413525e0..680f4c6e34b30e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
@@ -1473,6 +1473,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64bf16(<vscale x 64 x bfloat> %va, <vs
; CHECK-NEXT: add a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: mv a3, a1
@@ -1542,6 +1543,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64bf16(<vscale x 64 x bfloat> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a6, a4
; CHECK-NEXT: .LBB85_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a7, a0, 3
@@ -1614,6 +1616,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64bf16(<vscale x 64 x bfloat> %va, <vs
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a2, a4
; CHECK-NEXT: .LBB85_6:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli a4, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v16
@@ -3720,6 +3723,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFH-NEXT: slli a1, a1, 4
; ZVFH-NEXT: sub sp, sp, a1
; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v24, v0
; ZVFH-NEXT: csrr a1, vlenb
; ZVFH-NEXT: slli a1, a1, 3
@@ -3747,6 +3751,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFH-NEXT: # %bb.1:
; ZVFH-NEXT: mv a2, a3
; ZVFH-NEXT: .LBB171_2:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v0, v24
; ZVFH-NEXT: csrr a0, vlenb
; ZVFH-NEXT: slli a0, a0, 3
@@ -3781,6 +3786,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: add a1, a1, a3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v24, v0
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: mv a3, a1
@@ -3850,6 +3856,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a6, a4
; ZVFHMIN-NEXT: .LBB171_2:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v0, v25
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a7, a0, 3
@@ -3922,6 +3929,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: # %bb.5:
; ZVFHMIN-NEXT: mv a2, a4
; ZVFHMIN-NEXT: .LBB171_6:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
index e8099c2d08a5f8..09cc0761729b69 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
@@ -1092,6 +1092,7 @@ define <vscale x 128 x i1> @icmp_eq_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -1117,6 +1118,7 @@ define <vscale x 128 x i1> @icmp_eq_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a1
; CHECK-NEXT: .LBB96_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -1143,6 +1145,7 @@ define <vscale x 128 x i1> @icmp_eq_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
define <vscale x 128 x i1> @icmp_eq_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_nxv128i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -1158,6 +1161,7 @@ define <vscale x 128 x i1> @icmp_eq_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB97_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
@@ -1173,6 +1177,7 @@ define <vscale x 128 x i1> @icmp_eq_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b,
define <vscale x 128 x i1> @icmp_eq_vx_swap_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_swap_nxv128i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -1188,6 +1193,7 @@ define <vscale x 128 x i1> @icmp_eq_vx_swap_nxv128i8(<vscale x 128 x i8> %va, i8
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB98_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
@@ -2244,6 +2250,7 @@ define <vscale x 32 x i1> @icmp_eq_vv_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -2271,6 +2278,7 @@ define <vscale x 32 x i1> @icmp_eq_vv_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB189_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -2299,6 +2307,7 @@ define <vscale x 32 x i1> @icmp_eq_vv_nxv32i32(<vscale x 32 x i32> %va, <vscale
define <vscale x 32 x i1> @icmp_eq_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_nxv32i32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: srli a2, a3, 2
@@ -2315,6 +2324,7 @@ define <vscale x 32 x i1> @icmp_eq_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a3
; CHECK-NEXT: .LBB190_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
@@ -2332,6 +2342,7 @@ define <vscale x 32 x i1> @icmp_eq_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b,
define <vscale x 32 x i1> @icmp_eq_vx_swap_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_swap_nxv32i32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: srli a2, a3, 2
@@ -2348,6 +2359,7 @@ define <vscale x 32 x i1> @icmp_eq_vx_swap_nxv32i32(<vscale x 32 x i32> %va, i32
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a3
; CHECK-NEXT: .LBB191_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index c91b02e8f15e47..56e2a940f388d2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -4865,6 +4865,7 @@ declare <4 x i1> @llvm.vp.icmp.v4i32(<4 x i32>, <4 x i32>, metadata, <4 x i1>, i
define void @sink_splat_vp_icmp(ptr nocapture %x, i32 signext %y, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_icmp:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
@@ -4906,6 +4907,7 @@ declare <4 x i1> @llvm.vp.fcmp.v4f32(<4 x float>, <4 x float>, metadata, <4 x i1
define void @sink_splat_vp_fcmp(ptr nocapture %x, float %y, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_fcmp:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a2, a0, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
index f8315de324e42b..bcd5c919418f11 100644
--- a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
@@ -663,6 +663,7 @@ declare <vscale x 3 x double> @llvm.experimental.vp.strided.load.nxv3f64.p0.i32(
define <vscale x 16 x double> @strided_load_nxv16f64(ptr %ptr, i64 %stride, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_load_nxv16f64:
; CHECK-RV32: # %bb.0:
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv1r.v v9, v0
; CHECK-RV32-NEXT: csrr a4, vlenb
; CHECK-RV32-NEXT: sub a2, a3, a4
@@ -688,6 +689,7 @@ define <vscale x 16 x double> @strided_load_nxv16f64(ptr %ptr, i64 %stride, <vsc
;
; CHECK-RV64-LABEL: strided_load_nxv16f64:
; CHECK-RV64: # %bb.0:
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv1r.v v9, v0
; CHECK-RV64-NEXT: csrr a4, vlenb
; CHECK-RV64-NEXT: sub a3, a2, a4
@@ -765,6 +767,7 @@ declare <vscale x 16 x double> @llvm.experimental.vp.strided.load.nxv16f64.p0.i6
define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vscale x 17 x i1> %mask, i32 zeroext %evl, ptr %hi_ptr) {
; CHECK-RV32-LABEL: strided_load_nxv17f64:
; CHECK-RV32: # %bb.0:
+; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv1r.v v8, v0
; CHECK-RV32-NEXT: csrr a2, vlenb
; CHECK-RV32-NEXT: slli a7, a2, 1
@@ -812,6 +815,7 @@ define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vsc
;
; CHECK-RV64-LABEL: strided_load_nxv17f64:
; CHECK-RV64: # %bb.0:
+; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv1r.v v8, v0
; CHECK-RV64-NEXT: csrr a4, vlenb
; CHECK-RV64-NEXT: slli a7, a4, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
index 98ec99bcfea33e..a71a2de34586d0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
@@ -615,6 +615,7 @@ define void @strided_store_nxv17f64(<vscale x 17 x double> %v, ptr %ptr, i32 sig
; CHECK-NEXT: slli a4, a4, 3
; CHECK-NEXT: sub sp, sp, a4
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: addi a4, sp, 16
; CHECK-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
@@ -630,6 +631,7 @@ define void @strided_store_nxv17f64(<vscale x 17 x double> %v, ptr %ptr, i32 sig
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a7, a4
; CHECK-NEXT: .LBB48_4:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vl8re64.v v16, (a0)
; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
index ab13c78da05e87..418e44c213a0e9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
@@ -158,6 +158,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i
define void @repeat_shuffle(<2 x double> %v, ptr noalias %q) {
; CHECK-LABEL: repeat_shuffle:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; CHECK-NEXT: vslideup.vi v10, v8, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
index ebd550013ec78f..dfa1712dee10cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
@@ -565,6 +565,7 @@ declare <vscale x 128 x i8> @llvm.vp.add.nxv128i8(<vscale x 128 x i8>, <vscale x
define <vscale x 128 x i8> @vadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv128i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
@@ -580,6 +581,7 @@ define <vscale x 128 x i8> @vadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB50_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
@@ -1343,6 +1345,7 @@ declare <vscale x 32 x i32> @llvm.vp.add.nxv32i32(<vscale x 32 x i32>, <vscale x
define <vscale x 32 x i32> @vadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv32i32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -1359,6 +1362,7 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
@@ -1399,6 +1403,7 @@ declare i32 @llvm.vscale.i32()
define <vscale x 32 x i32> @vadd_vi_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m) {
; CHECK-LABEL: vadd_vi_nxv32i32_evl_nx8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a2, a0, 2
@@ -1415,6 +1420,7 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB120_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vcpop.ll b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll
index e59a9174b03d94..e26e4f8227974f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcpop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll
@@ -43,6 +43,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv1i1(
define iXLen @intrinsic_vcpop_mask_m_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv1i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
@@ -97,6 +98,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv2i1(
define iXLen @intrinsic_vcpop_mask_m_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv2i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
@@ -137,6 +139,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv4i1(
define iXLen @intrinsic_vcpop_mask_m_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv4i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
@@ -177,6 +180,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv8i1(
define iXLen @intrinsic_vcpop_mask_m_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv8i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
@@ -217,6 +221,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv16i1(
define iXLen @intrinsic_vcpop_mask_m_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv16i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
@@ -257,6 +262,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv32i1(
define iXLen @intrinsic_vcpop_mask_m_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv32i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
@@ -297,6 +303,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv64i1(
define iXLen @intrinsic_vcpop_mask_m_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv64i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
index 54d2f3f68989bf..f4617a8b0faab3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
@@ -7,6 +7,7 @@
define {<16 x i1>, <16 x i1>} @vector_deinterleave_v16i1_v32i1(<32 x i1> %vec) {
; CHECK-LABEL: vector_deinterleave_v16i1_v32i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
index f20a90a4223139..b4bb23566fe289 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
@@ -131,6 +131,7 @@ ret {<vscale x 64 x i1>, <vscale x 64 x i1>} %retval
define {<vscale x 64 x i8>, <vscale x 64 x i8>} @vector_deinterleave_nxv64i8_nxv128i8(<vscale x 128 x i8> %vec) {
; CHECK-LABEL: vector_deinterleave_nxv64i8_nxv128i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v24, 0
@@ -146,6 +147,7 @@ ret {<vscale x 64 x i8>, <vscale x 64 x i8>} %retval
define {<vscale x 32 x i16>, <vscale x 32 x i16>} @vector_deinterleave_nxv32i16_nxv64i16(<vscale x 64 x i16> %vec) {
; CHECK-LABEL: vector_deinterleave_nxv32i16_nxv64i16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v24, 0
@@ -161,6 +163,7 @@ ret {<vscale x 32 x i16>, <vscale x 32 x i16>} %retval
define {<vscale x 16 x i32>, <vscale x 16 x i32>} @vector_deinterleave_nxv16i32_nxvv32i32(<vscale x 32 x i32> %vec) {
; CHECK-LABEL: vector_deinterleave_nxv16i32_nxvv32i32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v16
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
@@ -189,6 +192,7 @@ define {<vscale x 8 x i64>, <vscale x 8 x i64>} @vector_deinterleave_nxv8i64_nxv
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; CHECK-NEXT: vid.v v8
@@ -387,6 +391,7 @@ declare {<vscale x 2 x double>, <vscale x 2 x double>} @llvm.vector.deinterleave
define {<vscale x 32 x bfloat>, <vscale x 32 x bfloat>} @vector_deinterleave_nxv32bf16_nxv64bf16(<vscale x 64 x bfloat> %vec) {
; CHECK-LABEL: vector_deinterleave_nxv32bf16_nxv64bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v24, 0
@@ -402,6 +407,7 @@ ret {<vscale x 32 x bfloat>, <vscale x 32 x bfloat>} %retval
define {<vscale x 32 x half>, <vscale x 32 x half>} @vector_deinterleave_nxv32f16_nxv64f16(<vscale x 64 x half> %vec) {
; CHECK-LABEL: vector_deinterleave_nxv32f16_nxv64f16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v24, 0
@@ -417,6 +423,7 @@ ret {<vscale x 32 x half>, <vscale x 32 x half>} %retval
define {<vscale x 16 x float>, <vscale x 16 x float>} @vector_deinterleave_nxv16f32_nxv32f32(<vscale x 32 x float> %vec) {
; CHECK-LABEL: vector_deinterleave_nxv16f32_nxv32f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v16
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
@@ -445,6 +452,7 @@ define {<vscale x 8 x double>, <vscale x 8 x double>} @vector_deinterleave_nxv8f
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; CHECK-NEXT: vid.v v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
index 7b0ac01918b9bd..b363f55b4f45a2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
@@ -91,6 +91,7 @@ define <8 x i32> @vector_interleave_v8i32_v4i32(<4 x i32> %a, <4 x i32> %b) {
define <4 x i64> @vector_interleave_v4i64_v2i64(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: vector_interleave_v4i64_v2i64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: lui a0, 12304
; CHECK-NEXT: addi a0, a0, 512
@@ -106,6 +107,7 @@ define <4 x i64> @vector_interleave_v4i64_v2i64(<2 x i64> %a, <2 x i64> %b) {
;
; ZVBB-LABEL: vector_interleave_v4i64_v2i64:
; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVBB-NEXT: vmv1r.v v10, v9
; ZVBB-NEXT: lui a0, 12304
; ZVBB-NEXT: addi a0, a0, 512
@@ -239,6 +241,7 @@ define <8 x float> @vector_interleave_v8f32_v4f32(<4 x float> %a, <4 x float> %b
define <4 x double> @vector_interleave_v4f64_v2f64(<2 x double> %a, <2 x double> %b) {
; CHECK-LABEL: vector_interleave_v4f64_v2f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: lui a0, 12304
; CHECK-NEXT: addi a0, a0, 512
@@ -254,6 +257,7 @@ define <4 x double> @vector_interleave_v4f64_v2f64(<2 x double> %a, <2 x double>
;
; ZVBB-LABEL: vector_interleave_v4f64_v2f64:
; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVBB-NEXT: vmv1r.v v10, v9
; ZVBB-NEXT: lui a0, 12304
; ZVBB-NEXT: addi a0, a0, 512
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
index bc203e215d8786..99a49786fda1e8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
@@ -9,6 +9,7 @@
define void @vector_interleave_store_nxv32i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, ptr %p) {
; CHECK-LABEL: vector_interleave_store_nxv32i1_nxv16i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
index 26e9afcb1d109b..72fa511e672bf2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
@@ -11,6 +11,7 @@
define <vscale x 32 x i1> @vector_interleave_nxv32i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
; CHECK-LABEL: vector_interleave_nxv32i1_nxv16i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
@@ -32,6 +33,7 @@ define <vscale x 32 x i1> @vector_interleave_nxv32i1_nxv16i1(<vscale x 16 x i1>
;
; ZVBB-LABEL: vector_interleave_nxv32i1_nxv16i1:
; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVBB-NEXT: vmv1r.v v9, v0
; ZVBB-NEXT: vmv1r.v v0, v8
; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, mu
@@ -160,6 +162,7 @@ declare <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64>,
define <vscale x 128 x i1> @vector_interleave_nxv128i1_nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) {
; CHECK-LABEL: vector_interleave_nxv128i1_nxv64i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
@@ -203,6 +206,7 @@ define <vscale x 128 x i1> @vector_interleave_nxv128i1_nxv64i1(<vscale x 64 x i1
define <vscale x 128 x i8> @vector_interleave_nxv128i8_nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
; CHECK-LABEL: vector_interleave_nxv128i8_nxv64i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vwaddu.vv v8, v24, v16
@@ -215,6 +219,7 @@ define <vscale x 128 x i8> @vector_interleave_nxv128i8_nxv64i8(<vscale x 64 x i8
;
; ZVBB-LABEL: vector_interleave_nxv128i8_nxv64i8:
; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; ZVBB-NEXT: vwsll.vi v8, v16, 8
@@ -230,6 +235,7 @@ define <vscale x 128 x i8> @vector_interleave_nxv128i8_nxv64i8(<vscale x 64 x i8
define <vscale x 64 x i16> @vector_interleave_nxv64i16_nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
; CHECK-LABEL: vector_interleave_nxv64i16_nxv32i16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vwaddu.vv v8, v24, v16
@@ -242,6 +248,7 @@ define <vscale x 64 x i16> @vector_interleave_nxv64i16_nxv32i16(<vscale x 32 x i
;
; ZVBB-LABEL: vector_interleave_nxv64i16_nxv32i16:
; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVBB-NEXT: vwsll.vi v8, v16, 16
@@ -257,6 +264,7 @@ define <vscale x 64 x i16> @vector_interleave_nxv64i16_nxv32i16(<vscale x 32 x i
define <vscale x 32 x i32> @vector_interleave_nxv32i32_nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b) {
; CHECK-LABEL: vector_interleave_nxv32i32_nxv16i32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
; CHECK-NEXT: vwaddu.vv v8, v24, v16
@@ -269,6 +277,7 @@ define <vscale x 32 x i32> @vector_interleave_nxv32i32_nxv16i32(<vscale x 16 x i
;
; ZVBB-LABEL: vector_interleave_nxv32i32_nxv16i32:
; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: li a0, 32
; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma
@@ -575,6 +584,7 @@ declare <vscale x 4 x double> @llvm.vector.interleave2.nxv4f64(<vscale x 2 x dou
define <vscale x 64 x bfloat> @vector_interleave_nxv64bf16_nxv32bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b) {
; CHECK-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vwaddu.vv v8, v24, v16
@@ -587,6 +597,7 @@ define <vscale x 64 x bfloat> @vector_interleave_nxv64bf16_nxv32bf16(<vscale x 3
;
; ZVBB-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVBB-NEXT: vwsll.vi v8, v16, 16
@@ -602,6 +613,7 @@ define <vscale x 64 x bfloat> @vector_interleave_nxv64bf16_nxv32bf16(<vscale x 3
define <vscale x 64 x half> @vector_interleave_nxv64f16_nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b) {
; CHECK-LABEL: vector_interleave_nxv64f16_nxv32f16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vwaddu.vv v8, v24, v16
@@ -614,6 +626,7 @@ define <vscale x 64 x half> @vector_interleave_nxv64f16_nxv32f16(<vscale x 32 x
;
; ZVBB-LABEL: vector_interleave_nxv64f16_nxv32f16:
; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVBB-NEXT: vwsll.vi v8, v16, 16
@@ -629,6 +642,7 @@ define <vscale x 64 x half> @vector_interleave_nxv64f16_nxv32f16(<vscale x 32 x
define <vscale x 32 x float> @vector_interleave_nxv32f32_nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b) {
; CHECK-LABEL: vector_interleave_nxv32f32_nxv16f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
; CHECK-NEXT: vwaddu.vv v8, v24, v16
@@ -641,6 +655,7 @@ define <vscale x 32 x float> @vector_interleave_nxv32f32_nxv16f32(<vscale x 16 x
;
; ZVBB-LABEL: vector_interleave_nxv32f32_nxv16f32:
; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: li a0, 32
; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll b/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
index 79bd60d1702f32..b58ac4d0520648 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
@@ -120,6 +120,7 @@ entry:
define <vscale x 1 x i8> @vadd_vv_passthru(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: vadd_vv_passthru:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vadd.vv v10, v8, v9
@@ -152,6 +153,7 @@ entry:
define <vscale x 1 x i8> @vadd_vv_passthru_negative(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: vadd_vv_passthru_negative:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vadd.vv v10, v8, v9
@@ -183,6 +185,7 @@ entry:
define <vscale x 1 x i8> @vadd_vv_mask(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %m) nounwind {
; CHECK-LABEL: vadd_vv_mask:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vadd.vv v10, v8, v9, v0.t
@@ -218,6 +221,7 @@ entry:
define <vscale x 1 x i8> @vadd_vv_mask_negative(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %m, <vscale x 1 x i1> %m2) nounwind {
; CHECK-LABEL: vadd_vv_mask_negative:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vadd.vv v11, v8, v9, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
index 6a72043ca7e8e6..7c79abb0e80987 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
@@ -11,6 +11,7 @@ declare <vscale x 1 x i1> @llvm.vector.splice.nxv1i1(<vscale x 1 x i1>, <vscale
define <vscale x 1 x i1> @splice_nxv1i1_offset_negone(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv1i1_offset_negone:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
@@ -33,6 +34,7 @@ define <vscale x 1 x i1> @splice_nxv1i1_offset_negone(<vscale x 1 x i1> %a, <vsc
define <vscale x 1 x i1> @splice_nxv1i1_offset_max(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv1i1_offset_max:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
@@ -59,6 +61,7 @@ declare <vscale x 2 x i1> @llvm.vector.splice.nxv2i1(<vscale x 2 x i1>, <vscale
define <vscale x 2 x i1> @splice_nxv2i1_offset_negone(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv2i1_offset_negone:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
@@ -81,6 +84,7 @@ define <vscale x 2 x i1> @splice_nxv2i1_offset_negone(<vscale x 2 x i1> %a, <vsc
define <vscale x 2 x i1> @splice_nxv2i1_offset_max(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv2i1_offset_max:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
@@ -107,6 +111,7 @@ declare <vscale x 4 x i1> @llvm.vector.splice.nxv4i1(<vscale x 4 x i1>, <vscale
define <vscale x 4 x i1> @splice_nxv4i1_offset_negone(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv4i1_offset_negone:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
@@ -129,6 +134,7 @@ define <vscale x 4 x i1> @splice_nxv4i1_offset_negone(<vscale x 4 x i1> %a, <vsc
define <vscale x 4 x i1> @splice_nxv4i1_offset_max(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv4i1_offset_max:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
@@ -155,6 +161,7 @@ declare <vscale x 8 x i1> @llvm.vector.splice.nxv8i1(<vscale x 8 x i1>, <vscale
define <vscale x 8 x i1> @splice_nxv8i1_offset_negone(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv8i1_offset_negone:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
@@ -176,6 +183,7 @@ define <vscale x 8 x i1> @splice_nxv8i1_offset_negone(<vscale x 8 x i1> %a, <vsc
define <vscale x 8 x i1> @splice_nxv8i1_offset_max(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv8i1_offset_max:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
@@ -201,6 +209,7 @@ declare <vscale x 16 x i1> @llvm.vector.splice.nxv16i1(<vscale x 16 x i1>, <vsca
define <vscale x 16 x i1> @splice_nxv16i1_offset_negone(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv16i1_offset_negone:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
@@ -223,6 +232,7 @@ define <vscale x 16 x i1> @splice_nxv16i1_offset_negone(<vscale x 16 x i1> %a, <
define <vscale x 16 x i1> @splice_nxv16i1_offset_max(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv16i1_offset_max:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
@@ -249,6 +259,7 @@ declare <vscale x 32 x i1> @llvm.vector.splice.nxv32i1(<vscale x 32 x i1>, <vsca
define <vscale x 32 x i1> @splice_nxv32i1_offset_negone(<vscale x 32 x i1> %a, <vscale x 32 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv32i1_offset_negone:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
@@ -296,6 +307,7 @@ declare <vscale x 64 x i1> @llvm.vector.splice.nxv64i1(<vscale x 64 x i1>, <vsca
define <vscale x 64 x i1> @splice_nxv64i1_offset_negone(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv64i1_offset_negone:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
index 2c92a5da8eecb7..99dd5331c78404 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
@@ -462,6 +462,7 @@ declare <vscale x 16 x double> @llvm.vp.fabs.nxv16f64(<vscale x 16 x double>, <v
define <vscale x 16 x double> @vfabs_vv_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfabs_vv_nxv16f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
@@ -477,6 +478,7 @@ define <vscale x 16 x double> @vfabs_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
index 1953cfd2a0169f..67d28dc3369f32 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
@@ -411,6 +411,7 @@ define <vscale x 32 x bfloat> @vfadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: addi a1, sp, 16
@@ -518,6 +519,7 @@ define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bf
; CHECK-NEXT: add a1, a2, a1
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: fmv.x.h a1, fa0
; CHECK-NEXT: csrr a2, vlenb
@@ -604,6 +606,7 @@ define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16_unmasked(<vscale x 32 x bfloat
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: fmv.x.h a1, fa0
; CHECK-NEXT: csrr a2, vlenb
@@ -1205,6 +1208,7 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: addi a1, sp, 16
@@ -1324,6 +1328,7 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: add a1, a2, a1
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
@@ -1416,6 +1421,7 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
index ccd286b7ee5fd3..e4f2f21cf14d96 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
@@ -373,6 +373,7 @@ define <vscale x 32 x bfloat> @vfdiv_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: addi a1, sp, 16
@@ -480,6 +481,7 @@ define <vscale x 32 x bfloat> @vfdiv_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bf
; CHECK-NEXT: add a1, a2, a1
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: fmv.x.h a1, fa0
; CHECK-NEXT: csrr a2, vlenb
@@ -566,6 +568,7 @@ define <vscale x 32 x bfloat> @vfdiv_vf_nxv32bf16_unmasked(<vscale x 32 x bfloat
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: fmv.x.h a1, fa0
; CHECK-NEXT: csrr a2, vlenb
@@ -1117,6 +1120,7 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: addi a1, sp, 16
@@ -1236,6 +1240,7 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: add a1, a2, a1
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
@@ -1328,6 +1333,7 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfirst.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll
index eafd605c6110eb..a04904141ba458 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfirst.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll
@@ -43,6 +43,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv1i1(
define iXLen @intrinsic_vfirst_mask_m_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv1i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
@@ -97,6 +98,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv2i1(
define iXLen @intrinsic_vfirst_mask_m_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv2i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
@@ -137,6 +139,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv4i1(
define iXLen @intrinsic_vfirst_mask_m_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv4i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
@@ -177,6 +180,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv8i1(
define iXLen @intrinsic_vfirst_mask_m_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv8i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
@@ -217,6 +221,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv16i1(
define iXLen @intrinsic_vfirst_mask_m_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv16i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
@@ -257,6 +262,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv32i1(
define iXLen @intrinsic_vfirst_mask_m_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv32i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
@@ -297,6 +303,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv64i1(
define iXLen @intrinsic_vfirst_mask_m_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv64i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
index fd518d9be786de..c082497f8115c9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
@@ -628,6 +628,7 @@ define <vscale x 32 x bfloat> @vfma_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <vs
; CHECK-NEXT: add a2, a2, a3
; CHECK-NEXT: sub sp, sp, a2
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vl8re16.v v0, (a0)
; CHECK-NEXT: csrr a2, vlenb
@@ -2193,6 +2194,7 @@ define <vscale x 32 x half> @vfma_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: add a2, a2, a3
; ZVFHMIN-NEXT: sub sp, sp, a2
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v24, v0
; ZVFHMIN-NEXT: vl8re16.v v0, (a0)
; ZVFHMIN-NEXT: csrr a2, vlenb
@@ -3662,6 +3664,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: add a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -3746,6 +3749,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB128_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
@@ -7796,6 +7800,7 @@ define <vscale x 16 x half> @vfnmadd_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
;
; ZVFHMIN-LABEL: vfnmadd_vv_nxv16f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv4r.v v4, v8
; ZVFHMIN-NEXT: lui a1, 8
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
@@ -8152,6 +8157,7 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_neg_splat_commute(<vscale x 16
;
; ZVFHMIN-LABEL: vfnmadd_vf_nxv16f16_neg_splat_commute:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv4r.v v4, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
@@ -8253,6 +8259,7 @@ define <vscale x 16 x half> @vfnmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
;
; ZVFHMIN-LABEL: vfnmsub_vv_nxv16f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv4r.v v4, v8
; ZVFHMIN-NEXT: lui a1, 8
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
@@ -8553,6 +8560,7 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_neg_splat(<vscale x 16 x half>
;
; ZVFHMIN-LABEL: vfnmsub_vf_nxv16f16_neg_splat:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv4r.v v4, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
@@ -8712,6 +8720,7 @@ define <vscale x 32 x half> @vfmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v8
; ZVFHMIN-NEXT: vl8re16.v v8, (a0)
; ZVFHMIN-NEXT: lui a2, 8
@@ -9020,6 +9029,7 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16(<vscale x 32 x half> %va, half %
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a3, a2
; ZVFHMIN-NEXT: .LBB282_2:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: addi a4, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
@@ -9277,6 +9287,7 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %v
; ZVFHMIN-NEXT: add a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: fmv.x.h a2, fa0
; ZVFHMIN-NEXT: lui a1, 8
@@ -10001,6 +10012,7 @@ define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked_commuted(<vscale x 32
; ZVFHMIN-NEXT: add a2, a2, a3
; ZVFHMIN-NEXT: sub sp, sp, a2
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
@@ -11154,6 +11166,7 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute(<vsc
; ZVFHMIN-NEXT: add a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
@@ -11766,6 +11779,7 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked_commuted(<vscale x 32
; ZVFHMIN-NEXT: add a2, a2, a3
; ZVFHMIN-NEXT: sub sp, sp, a2
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
@@ -11924,6 +11938,7 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, half
; ZVFHMIN-NEXT: add a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: fmv.x.h a2, fa0
; ZVFHMIN-NEXT: lui a3, 8
@@ -12075,6 +12090,7 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_commute(<vscale x 32 x half> %v
; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: fmv.x.h a2, fa0
; ZVFHMIN-NEXT: lui a3, 8
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
index 1d471ab2404b17..f6ca80716ad7cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
@@ -227,6 +227,7 @@ define <vscale x 32 x bfloat> @vfmadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v0, v16
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -314,6 +315,7 @@ define <vscale x 32 x bfloat> @vfmadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, <
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: sub sp, sp, a0
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -664,6 +666,7 @@ define <vscale x 32 x half> @vfmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -757,6 +760,7 @@ define <vscale x 32 x half> @vfmadd_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
index 88fd81a5a2f7bc..35b23f0af8bd57 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
@@ -226,6 +226,7 @@ define <vscale x 32 x bfloat> @vfmadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <
; ZVFH-NEXT: slli a1, a1, 5
; ZVFH-NEXT: sub sp, sp, a1
; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv8r.v v0, v16
; ZVFH-NEXT: addi a1, sp, 16
; ZVFH-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -316,6 +317,7 @@ define <vscale x 32 x bfloat> @vfmadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <
; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -402,6 +404,7 @@ define <vscale x 32 x bfloat> @vfmadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, <
; ZVFH-NEXT: slli a0, a0, 5
; ZVFH-NEXT: sub sp, sp, a0
; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv8r.v v0, v16
; ZVFH-NEXT: addi a0, sp, 16
; ZVFH-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
@@ -498,6 +501,7 @@ define <vscale x 32 x bfloat> @vfmadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, <
; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: sub sp, sp, a0
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
@@ -875,6 +879,7 @@ define <vscale x 32 x half> @vfmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -967,6 +972,7 @@ define <vscale x 32 x half> @vfmadd_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: sub sp, sp, a0
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
index dafcf8a1410d32..12a4c7d82af10a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
@@ -183,6 +183,7 @@ define <vscale x 32 x bfloat> @vfmax_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: addi a1, sp, 16
@@ -516,6 +517,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: addi a1, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
index b3df6572f79369..ec1e2e42eb0cdb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
@@ -183,6 +183,7 @@ define <vscale x 32 x bfloat> @vfmin_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: addi a1, sp, 16
@@ -516,6 +517,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: addi a1, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
index f4a236df4c9e4f..c5665554f0d167 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
@@ -495,6 +495,7 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: addi a1, sp, 16
@@ -614,6 +615,7 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: add a1, a2, a1
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
@@ -706,6 +708,7 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
index d1702268f829fa..3d6d33f06c17bc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
@@ -1112,6 +1112,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a3, 24
@@ -1186,6 +1187,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB92_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
index 343098e87649ea..cdddc44d92ab4a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
@@ -450,6 +450,7 @@ declare <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double>, <v
define <vscale x 16 x double> @vfneg_vv_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfneg_vv_nxv16f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
@@ -465,6 +466,7 @@ define <vscale x 16 x double> @vfneg_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfneg.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
index 3705e73fda492e..fdfdf06a340769 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
@@ -329,6 +329,7 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: li a2, 24
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
index 80edf0e3a4d811..3c31c8414a6712 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
@@ -310,6 +310,7 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a1, a1, 4
@@ -389,6 +390,7 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
index 341fe678183b6f..27dd928107e9c9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
@@ -96,6 +96,7 @@ declare <vscale x 32 x float> @llvm.vp.fpext.nxv32f32.nxv32f16(<vscale x 32 x ha
define <vscale x 32 x float> @vfpext_nxv32f16_nxv32f32(<vscale x 32 x half> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vfpext_nxv32f16_nxv32f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -112,6 +113,7 @@ define <vscale x 32 x float> @vfpext_nxv32f16_nxv32f32(<vscale x 32 x half> %a,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB7_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfwcvt.f.f.v v24, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
index cf195c7c0935e4..a9c0d7f4072396 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
@@ -508,6 +508,7 @@ declare <vscale x 32 x i16> @llvm.vp.fptosi.nxv32i16.nxv32f32(<vscale x 32 x flo
define <vscale x 32 x i16> @vfptosi_nxv32i16_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptosi_nxv32i16_nxv32f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -524,6 +525,7 @@ define <vscale x 32 x i16> @vfptosi_nxv32i16_nxv32f32(<vscale x 32 x float> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB34_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfncvt.rtz.x.f.w v24, v8, v0.t
@@ -538,6 +540,7 @@ declare <vscale x 32 x i32> @llvm.vp.fptosi.nxv32i32.nxv32f32(<vscale x 32 x flo
define <vscale x 32 x i32> @vfptosi_nxv32i32_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptosi_nxv32i32_nxv32f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -554,6 +557,7 @@ define <vscale x 32 x i32> @vfptosi_nxv32i32_nxv32f32(<vscale x 32 x float> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB35_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
index 952d28604b86c6..be75707449ac10 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
@@ -508,6 +508,7 @@ declare <vscale x 32 x i16> @llvm.vp.fptoui.nxv32i16.nxv32f32(<vscale x 32 x flo
define <vscale x 32 x i16> @vfptoui_nxv32i16_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptoui_nxv32i16_nxv32f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -524,6 +525,7 @@ define <vscale x 32 x i16> @vfptoui_nxv32i16_nxv32f32(<vscale x 32 x float> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB34_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfncvt.rtz.xu.f.w v24, v8, v0.t
@@ -538,6 +540,7 @@ declare <vscale x 32 x i32> @llvm.vp.fptoui.nxv32i32.nxv32f32(<vscale x 32 x flo
define <vscale x 32 x i32> @vfptoui_nxv32i32_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptoui_nxv32i32_nxv32f32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -554,6 +557,7 @@ define <vscale x 32 x i32> @vfptoui_nxv32i32_nxv32f32(<vscale x 32 x float> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB35_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
index 874813f0575953..d2dc927182cc86 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
@@ -102,6 +102,7 @@ define <vscale x 16 x float> @vfptrunc_nxv16f32_nxv16f64(<vscale x 16 x double>
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -121,6 +122,7 @@ define <vscale x 16 x float> @vfptrunc_nxv16f32_nxv16f64(<vscale x 16 x double>
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB7_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfncvt.f.f.w v16, v8, v0.t
@@ -147,6 +149,7 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -181,6 +184,7 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a5, a1
; CHECK-NEXT: .LBB8_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v6, v7, a3
@@ -194,6 +198,7 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: sltu a3, a2, a0
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a0, a3, a0
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
@@ -203,6 +208,7 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB8_6:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
index 8edcf23988c7fb..69951bed12a977 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
@@ -167,6 +167,7 @@ declare <vscale x 32 x bfloat> @llvm.vp.sqrt.nxv32bf16(<vscale x 32 x bfloat>, <
define <vscale x 32 x bfloat> @vfsqrt_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsqrt_vv_nxv32bf16:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a1, a2, 1
@@ -452,6 +453,7 @@ define <vscale x 32 x half> @vfsqrt_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vfsqrt_vv_nxv32f16:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v16, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
@@ -749,6 +751,7 @@ declare <vscale x 16 x double> @llvm.vp.sqrt.nxv16f64(<vscale x 16 x double>, <v
define <vscale x 16 x double> @vfsqrt_vv_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsqrt_vv_nxv16f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
@@ -764,6 +767,7 @@ define <vscale x 16 x double> @vfsqrt_vv_nxv16f64(<vscale x 16 x double> %va, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB44_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsqrt.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
index 25a80e66c4a527..a9ee0983f1a331 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
@@ -373,6 +373,7 @@ define <vscale x 32 x bfloat> @vfsub_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: addi a1, sp, 16
@@ -480,6 +481,7 @@ define <vscale x 32 x bfloat> @vfsub_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bf
; CHECK-NEXT: add a1, a2, a1
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: fmv.x.h a1, fa0
; CHECK-NEXT: csrr a2, vlenb
@@ -566,6 +568,7 @@ define <vscale x 32 x bfloat> @vfsub_vf_nxv32bf16_unmasked(<vscale x 32 x bfloat
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: fmv.x.h a1, fa0
; CHECK-NEXT: csrr a2, vlenb
@@ -1117,6 +1120,7 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: addi a1, sp, 16
@@ -1236,6 +1240,7 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: add a1, a2, a1
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
@@ -1328,6 +1333,7 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
index 1a1472fcfc66f5..41ec373ab09d5a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
@@ -111,6 +111,7 @@ define <vscale x 4 x i32> @different_vl_with_ta(<vscale x 4 x i32> %a, <vscale x
define <vscale x 4 x i32> @different_vl_with_tu(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1, iXLen %vl2) {
; CHECK-LABEL: different_vl_with_tu:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v14, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
; CHECK-NEXT: vadd.vv v14, v10, v12
@@ -126,6 +127,7 @@ define <vscale x 4 x i32> @different_vl_with_tu(<vscale x 4 x i32> %passthru, <v
define <vscale x 4 x i32> @different_imm_vl_with_tu(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1, iXLen %vl2) {
; CHECK-LABEL: different_imm_vl_with_tu:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v14, v10
; CHECK-NEXT: vsetivli zero, 5, e32, m2, tu, ma
; CHECK-NEXT: vadd.vv v14, v10, v12
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll
index 1516d656663b6b..a4aae25f93c1d2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll
@@ -51,6 +51,7 @@ entry:
define <vscale x 16 x i16> @test_vlseg2ff_mask_dead_vl(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vlseg2ff_mask_dead_vl:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll
index b89097b8ff9744..5d9ba18deb5024 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll
@@ -25,6 +25,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg2ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -64,6 +65,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg2ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -103,6 +105,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg2ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -142,6 +145,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg2ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -181,6 +185,7 @@ entry:
define <vscale x 16 x i8> @test_vlseg2ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -220,6 +225,7 @@ entry:
define <vscale x 32 x i8> @test_vlseg2ff_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 32 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -259,6 +265,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg3ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -299,6 +306,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg3ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -339,6 +347,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg3ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -379,6 +388,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg3ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -419,6 +429,7 @@ entry:
define <vscale x 16 x i8> @test_vlseg3ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -459,6 +470,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg4ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -500,6 +512,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg4ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -541,6 +554,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg4ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -582,6 +596,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg4ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -623,6 +638,7 @@ entry:
define <vscale x 16 x i8> @test_vlseg4ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -664,6 +680,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg5ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -706,6 +723,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg5ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -748,6 +766,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg5ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -790,6 +809,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg5ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -832,6 +852,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg6ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -875,6 +896,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg6ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -918,6 +940,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg6ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -961,6 +984,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg6ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1004,6 +1028,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1048,6 +1073,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1092,6 +1118,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1136,6 +1163,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1180,6 +1208,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1225,6 +1254,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1270,6 +1300,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1315,6 +1346,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1359,6 +1391,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg2ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1397,6 +1430,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg2ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1435,6 +1469,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg2ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1473,6 +1508,7 @@ entry:
define <vscale x 8 x i16> @test_vlseg2ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1511,6 +1547,7 @@ entry:
define <vscale x 16 x i16> @test_vlseg2ff_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1549,6 +1586,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg3ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1588,6 +1626,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg3ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1627,6 +1666,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg3ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1666,6 +1706,7 @@ entry:
define <vscale x 8 x i16> @test_vlseg3ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -1705,6 +1746,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg4ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1745,6 +1787,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg4ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1785,6 +1828,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg4ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1825,6 +1869,7 @@ entry:
define <vscale x 8 x i16> @test_vlseg4ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -1865,6 +1910,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg5ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1906,6 +1952,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg5ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1947,6 +1994,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg5ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1988,6 +2036,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg6ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2030,6 +2079,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg6ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2072,6 +2122,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg6ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2114,6 +2165,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2157,6 +2209,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2200,6 +2253,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2243,6 +2297,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2287,6 +2342,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2331,6 +2387,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2375,6 +2432,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg2ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -2413,6 +2471,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg2ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -2451,6 +2510,7 @@ entry:
define <vscale x 4 x i32> @test_vlseg2ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -2489,6 +2549,7 @@ entry:
define <vscale x 8 x i32> @test_vlseg2ff_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -2527,6 +2588,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg3ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2566,6 +2628,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg3ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2605,6 +2668,7 @@ entry:
define <vscale x 4 x i32> @test_vlseg3ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -2644,6 +2708,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg4ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2684,6 +2749,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg4ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2724,6 +2790,7 @@ entry:
define <vscale x 4 x i32> @test_vlseg4ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -2764,6 +2831,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg5ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2805,6 +2873,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg5ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2846,6 +2915,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg6ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2888,6 +2958,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg6ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2930,6 +3001,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2973,6 +3045,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3016,6 +3089,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3060,6 +3134,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3104,6 +3179,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg2ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -3142,6 +3218,7 @@ entry:
define <vscale x 2 x i64> @test_vlseg2ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -3180,6 +3257,7 @@ entry:
define <vscale x 4 x i64> @test_vlseg2ff_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -3218,6 +3296,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg3ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3257,6 +3336,7 @@ entry:
define <vscale x 2 x i64> @test_vlseg3ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -3296,6 +3376,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg4ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3336,6 +3417,7 @@ entry:
define <vscale x 2 x i64> @test_vlseg4ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -3376,6 +3458,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg5ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3417,6 +3500,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg6ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3459,6 +3543,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3502,6 +3587,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3545,6 +3631,7 @@ entry:
define <vscale x 1 x half> @test_vlseg2ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -3582,6 +3669,7 @@ entry:
define <vscale x 2 x half> @test_vlseg2ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -3619,6 +3707,7 @@ entry:
define <vscale x 4 x half> @test_vlseg2ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -3656,6 +3745,7 @@ entry:
define <vscale x 8 x half> @test_vlseg2ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -3693,6 +3783,7 @@ entry:
define <vscale x 16 x half> @test_vlseg2ff_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -3730,6 +3821,7 @@ entry:
define <vscale x 1 x half> @test_vlseg3ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3768,6 +3860,7 @@ entry:
define <vscale x 2 x half> @test_vlseg3ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3806,6 +3899,7 @@ entry:
define <vscale x 4 x half> @test_vlseg3ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3844,6 +3938,7 @@ entry:
define <vscale x 8 x half> @test_vlseg3ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -3882,6 +3977,7 @@ entry:
define <vscale x 1 x half> @test_vlseg4ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3921,6 +4017,7 @@ entry:
define <vscale x 2 x half> @test_vlseg4ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3960,6 +4057,7 @@ entry:
define <vscale x 4 x half> @test_vlseg4ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3999,6 +4097,7 @@ entry:
define <vscale x 8 x half> @test_vlseg4ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -4038,6 +4137,7 @@ entry:
define <vscale x 1 x half> @test_vlseg5ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4078,6 +4178,7 @@ entry:
define <vscale x 2 x half> @test_vlseg5ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4118,6 +4219,7 @@ entry:
define <vscale x 4 x half> @test_vlseg5ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4158,6 +4260,7 @@ entry:
define <vscale x 1 x half> @test_vlseg6ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4199,6 +4302,7 @@ entry:
define <vscale x 2 x half> @test_vlseg6ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4240,6 +4344,7 @@ entry:
define <vscale x 4 x half> @test_vlseg6ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4281,6 +4386,7 @@ entry:
define <vscale x 1 x half> @test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4323,6 +4429,7 @@ entry:
define <vscale x 2 x half> @test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4365,6 +4472,7 @@ entry:
define <vscale x 4 x half> @test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4407,6 +4515,7 @@ entry:
define <vscale x 1 x half> @test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4450,6 +4559,7 @@ entry:
define <vscale x 2 x half> @test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4493,6 +4603,7 @@ entry:
define <vscale x 4 x half> @test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4536,6 +4647,7 @@ entry:
define <vscale x 1 x float> @test_vlseg2ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -4573,6 +4685,7 @@ entry:
define <vscale x 2 x float> @test_vlseg2ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -4610,6 +4723,7 @@ entry:
define <vscale x 4 x float> @test_vlseg2ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -4647,6 +4761,7 @@ entry:
define <vscale x 8 x float> @test_vlseg2ff_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -4684,6 +4799,7 @@ entry:
define <vscale x 1 x float> @test_vlseg3ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4722,6 +4838,7 @@ entry:
define <vscale x 2 x float> @test_vlseg3ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4760,6 +4877,7 @@ entry:
define <vscale x 4 x float> @test_vlseg3ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -4798,6 +4916,7 @@ entry:
define <vscale x 1 x float> @test_vlseg4ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4837,6 +4956,7 @@ entry:
define <vscale x 2 x float> @test_vlseg4ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4876,6 +4996,7 @@ entry:
define <vscale x 4 x float> @test_vlseg4ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -4915,6 +5036,7 @@ entry:
define <vscale x 1 x float> @test_vlseg5ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4955,6 +5077,7 @@ entry:
define <vscale x 2 x float> @test_vlseg5ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4995,6 +5118,7 @@ entry:
define <vscale x 1 x float> @test_vlseg6ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5036,6 +5160,7 @@ entry:
define <vscale x 2 x float> @test_vlseg6ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5077,6 +5202,7 @@ entry:
define <vscale x 1 x float> @test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5119,6 +5245,7 @@ entry:
define <vscale x 2 x float> @test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5161,6 +5288,7 @@ entry:
define <vscale x 1 x float> @test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5204,6 +5332,7 @@ entry:
define <vscale x 2 x float> @test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5247,6 +5376,7 @@ entry:
define <vscale x 1 x double> @test_vlseg2ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -5284,6 +5414,7 @@ entry:
define <vscale x 2 x double> @test_vlseg2ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -5321,6 +5452,7 @@ entry:
define <vscale x 4 x double> @test_vlseg2ff_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -5358,6 +5490,7 @@ entry:
define <vscale x 1 x double> @test_vlseg3ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5396,6 +5529,7 @@ entry:
define <vscale x 2 x double> @test_vlseg3ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -5434,6 +5568,7 @@ entry:
define <vscale x 1 x double> @test_vlseg4ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5473,6 +5608,7 @@ entry:
define <vscale x 2 x double> @test_vlseg4ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -5512,6 +5648,7 @@ entry:
define <vscale x 1 x double> @test_vlseg5ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5552,6 +5689,7 @@ entry:
define <vscale x 1 x double> @test_vlseg6ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5593,6 +5731,7 @@ entry:
define <vscale x 1 x double> @test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5635,6 +5774,7 @@ entry:
define <vscale x 1 x double> @test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5678,6 +5818,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg2ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -5715,6 +5856,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg2ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -5752,6 +5894,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg2ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -5789,6 +5932,7 @@ entry:
define <vscale x 8 x bfloat> @test_vlseg2ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -5826,6 +5970,7 @@ entry:
define <vscale x 16 x bfloat> @test_vlseg2ff_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -5863,6 +6008,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg3ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5901,6 +6047,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg3ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5939,6 +6086,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg3ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5977,6 +6125,7 @@ entry:
define <vscale x 8 x bfloat> @test_vlseg3ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -6015,6 +6164,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg4ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6054,6 +6204,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg4ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6093,6 +6244,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg4ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6132,6 +6284,7 @@ entry:
define <vscale x 8 x bfloat> @test_vlseg4ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -6171,6 +6324,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg5ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6211,6 +6365,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg5ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6251,6 +6406,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg5ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6291,6 +6447,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg6ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6332,6 +6489,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg6ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6373,6 +6531,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg6ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6414,6 +6573,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6456,6 +6616,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6498,6 +6659,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6540,6 +6702,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6583,6 +6746,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6626,6 +6790,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll
index 3dc0db90b6d854..c4c0a2ee101fd0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll
@@ -51,6 +51,7 @@ entry:
define <vscale x 16 x i16> @test_vlseg2ff_mask_dead_vl(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vlseg2ff_mask_dead_vl:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll
index 68acb3beb06867..1f4c0e3ace3917 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll
@@ -25,6 +25,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg2ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -64,6 +65,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg2ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -103,6 +105,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg2ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -142,6 +145,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg2ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -181,6 +185,7 @@ entry:
define <vscale x 16 x i8> @test_vlseg2ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -220,6 +225,7 @@ entry:
define <vscale x 32 x i8> @test_vlseg2ff_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 32 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -259,6 +265,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg3ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -299,6 +306,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg3ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -339,6 +347,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg3ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -379,6 +388,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg3ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -419,6 +429,7 @@ entry:
define <vscale x 16 x i8> @test_vlseg3ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -459,6 +470,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg4ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -500,6 +512,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg4ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -541,6 +554,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg4ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -582,6 +596,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg4ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -623,6 +638,7 @@ entry:
define <vscale x 16 x i8> @test_vlseg4ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -664,6 +680,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg5ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -706,6 +723,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg5ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -748,6 +766,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg5ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -790,6 +809,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg5ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -832,6 +852,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg6ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -875,6 +896,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg6ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -918,6 +940,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg6ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -961,6 +984,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg6ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1004,6 +1028,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1048,6 +1073,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1092,6 +1118,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1136,6 +1163,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1180,6 +1208,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1225,6 +1254,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1270,6 +1300,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1315,6 +1346,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1359,6 +1391,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg2ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1397,6 +1430,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg2ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1435,6 +1469,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg2ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1473,6 +1508,7 @@ entry:
define <vscale x 8 x i16> @test_vlseg2ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1511,6 +1547,7 @@ entry:
define <vscale x 16 x i16> @test_vlseg2ff_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1549,6 +1586,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg3ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1588,6 +1626,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg3ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1627,6 +1666,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg3ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1666,6 +1706,7 @@ entry:
define <vscale x 8 x i16> @test_vlseg3ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -1705,6 +1746,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg4ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1745,6 +1787,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg4ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1785,6 +1828,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg4ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1825,6 +1869,7 @@ entry:
define <vscale x 8 x i16> @test_vlseg4ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -1865,6 +1910,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg5ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1906,6 +1952,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg5ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1947,6 +1994,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg5ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1988,6 +2036,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg6ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2030,6 +2079,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg6ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2072,6 +2122,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg6ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2114,6 +2165,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2157,6 +2209,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2200,6 +2253,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2243,6 +2297,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2287,6 +2342,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2331,6 +2387,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2375,6 +2432,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg2ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -2413,6 +2471,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg2ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -2451,6 +2510,7 @@ entry:
define <vscale x 4 x i32> @test_vlseg2ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -2489,6 +2549,7 @@ entry:
define <vscale x 8 x i32> @test_vlseg2ff_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -2527,6 +2588,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg3ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2566,6 +2628,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg3ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2605,6 +2668,7 @@ entry:
define <vscale x 4 x i32> @test_vlseg3ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -2644,6 +2708,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg4ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2684,6 +2749,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg4ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2724,6 +2790,7 @@ entry:
define <vscale x 4 x i32> @test_vlseg4ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -2764,6 +2831,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg5ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2805,6 +2873,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg5ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2846,6 +2915,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg6ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2888,6 +2958,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg6ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2930,6 +3001,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2973,6 +3045,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3016,6 +3089,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3060,6 +3134,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3104,6 +3179,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg2ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -3142,6 +3218,7 @@ entry:
define <vscale x 2 x i64> @test_vlseg2ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -3180,6 +3257,7 @@ entry:
define <vscale x 4 x i64> @test_vlseg2ff_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -3218,6 +3296,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg3ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3257,6 +3336,7 @@ entry:
define <vscale x 2 x i64> @test_vlseg3ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -3296,6 +3376,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg4ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3336,6 +3417,7 @@ entry:
define <vscale x 2 x i64> @test_vlseg4ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -3376,6 +3458,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg5ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3417,6 +3500,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg6ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3459,6 +3543,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3502,6 +3587,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3545,6 +3631,7 @@ entry:
define <vscale x 1 x half> @test_vlseg2ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -3582,6 +3669,7 @@ entry:
define <vscale x 2 x half> @test_vlseg2ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -3619,6 +3707,7 @@ entry:
define <vscale x 4 x half> @test_vlseg2ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -3656,6 +3745,7 @@ entry:
define <vscale x 8 x half> @test_vlseg2ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -3693,6 +3783,7 @@ entry:
define <vscale x 16 x half> @test_vlseg2ff_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -3730,6 +3821,7 @@ entry:
define <vscale x 1 x half> @test_vlseg3ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3768,6 +3860,7 @@ entry:
define <vscale x 2 x half> @test_vlseg3ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3806,6 +3899,7 @@ entry:
define <vscale x 4 x half> @test_vlseg3ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3844,6 +3938,7 @@ entry:
define <vscale x 8 x half> @test_vlseg3ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -3882,6 +3977,7 @@ entry:
define <vscale x 1 x half> @test_vlseg4ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3921,6 +4017,7 @@ entry:
define <vscale x 2 x half> @test_vlseg4ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3960,6 +4057,7 @@ entry:
define <vscale x 4 x half> @test_vlseg4ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3999,6 +4097,7 @@ entry:
define <vscale x 8 x half> @test_vlseg4ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -4038,6 +4137,7 @@ entry:
define <vscale x 1 x half> @test_vlseg5ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4078,6 +4178,7 @@ entry:
define <vscale x 2 x half> @test_vlseg5ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4118,6 +4219,7 @@ entry:
define <vscale x 4 x half> @test_vlseg5ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4158,6 +4260,7 @@ entry:
define <vscale x 1 x half> @test_vlseg6ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4199,6 +4302,7 @@ entry:
define <vscale x 2 x half> @test_vlseg6ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4240,6 +4344,7 @@ entry:
define <vscale x 4 x half> @test_vlseg6ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4281,6 +4386,7 @@ entry:
define <vscale x 1 x half> @test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4323,6 +4429,7 @@ entry:
define <vscale x 2 x half> @test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4365,6 +4472,7 @@ entry:
define <vscale x 4 x half> @test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4407,6 +4515,7 @@ entry:
define <vscale x 1 x half> @test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4450,6 +4559,7 @@ entry:
define <vscale x 2 x half> @test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4493,6 +4603,7 @@ entry:
define <vscale x 4 x half> @test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4536,6 +4647,7 @@ entry:
define <vscale x 1 x float> @test_vlseg2ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -4573,6 +4685,7 @@ entry:
define <vscale x 2 x float> @test_vlseg2ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -4610,6 +4723,7 @@ entry:
define <vscale x 4 x float> @test_vlseg2ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -4647,6 +4761,7 @@ entry:
define <vscale x 8 x float> @test_vlseg2ff_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -4684,6 +4799,7 @@ entry:
define <vscale x 1 x float> @test_vlseg3ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4722,6 +4838,7 @@ entry:
define <vscale x 2 x float> @test_vlseg3ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4760,6 +4877,7 @@ entry:
define <vscale x 4 x float> @test_vlseg3ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -4798,6 +4916,7 @@ entry:
define <vscale x 1 x float> @test_vlseg4ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4837,6 +4956,7 @@ entry:
define <vscale x 2 x float> @test_vlseg4ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4876,6 +4996,7 @@ entry:
define <vscale x 4 x float> @test_vlseg4ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -4915,6 +5036,7 @@ entry:
define <vscale x 1 x float> @test_vlseg5ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4955,6 +5077,7 @@ entry:
define <vscale x 2 x float> @test_vlseg5ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4995,6 +5118,7 @@ entry:
define <vscale x 1 x float> @test_vlseg6ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5036,6 +5160,7 @@ entry:
define <vscale x 2 x float> @test_vlseg6ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5077,6 +5202,7 @@ entry:
define <vscale x 1 x float> @test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5119,6 +5245,7 @@ entry:
define <vscale x 2 x float> @test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5161,6 +5288,7 @@ entry:
define <vscale x 1 x float> @test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5204,6 +5332,7 @@ entry:
define <vscale x 2 x float> @test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5247,6 +5376,7 @@ entry:
define <vscale x 1 x double> @test_vlseg2ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -5284,6 +5414,7 @@ entry:
define <vscale x 2 x double> @test_vlseg2ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -5321,6 +5452,7 @@ entry:
define <vscale x 4 x double> @test_vlseg2ff_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -5358,6 +5490,7 @@ entry:
define <vscale x 1 x double> @test_vlseg3ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5396,6 +5529,7 @@ entry:
define <vscale x 2 x double> @test_vlseg3ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -5434,6 +5568,7 @@ entry:
define <vscale x 1 x double> @test_vlseg4ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5473,6 +5608,7 @@ entry:
define <vscale x 2 x double> @test_vlseg4ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -5512,6 +5648,7 @@ entry:
define <vscale x 1 x double> @test_vlseg5ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5552,6 +5689,7 @@ entry:
define <vscale x 1 x double> @test_vlseg6ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5593,6 +5731,7 @@ entry:
define <vscale x 1 x double> @test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5635,6 +5774,7 @@ entry:
define <vscale x 1 x double> @test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5678,6 +5818,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg2ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -5715,6 +5856,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg2ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -5752,6 +5894,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg2ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -5789,6 +5932,7 @@ entry:
define <vscale x 8 x bfloat> @test_vlseg2ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -5826,6 +5970,7 @@ entry:
define <vscale x 16 x bfloat> @test_vlseg2ff_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -5863,6 +6008,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg3ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5901,6 +6047,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg3ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5939,6 +6086,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg3ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5977,6 +6125,7 @@ entry:
define <vscale x 8 x bfloat> @test_vlseg3ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -6015,6 +6164,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg4ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6054,6 +6204,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg4ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6093,6 +6244,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg4ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6132,6 +6284,7 @@ entry:
define <vscale x 8 x bfloat> @test_vlseg4ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -6171,6 +6324,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg5ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6211,6 +6365,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg5ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6251,6 +6406,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg5ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6291,6 +6447,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg6ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6332,6 +6489,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg6ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6373,6 +6531,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg6ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6414,6 +6573,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6456,6 +6616,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6498,6 +6659,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6540,6 +6702,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6583,6 +6746,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6626,6 +6790,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
index 0b553d3cd6fdf4..0fbf710e0d2c6a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
@@ -412,6 +412,7 @@ declare <vscale x 128 x i8> @llvm.vp.smax.nxv128i8(<vscale x 128 x i8>, <vscale
define <vscale x 128 x i8> @vmax_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv128i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -427,6 +428,7 @@ define <vscale x 128 x i8> @vmax_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB34_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
@@ -974,6 +976,7 @@ declare <vscale x 32 x i32> @llvm.vp.smax.nxv32i32(<vscale x 32 x i32>, <vscale
define <vscale x 32 x i32> @vmax_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv32i32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: srli a3, a2, 2
@@ -990,6 +993,7 @@ define <vscale x 32 x i32> @vmax_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB80_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
@@ -1034,6 +1038,7 @@ declare i32 @llvm.vscale.i32()
define <vscale x 32 x i32> @vmax_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
; CHECK-LABEL: vmax_vx_nxv32i32_evl_nx8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a3, a1, 2
@@ -1050,6 +1055,7 @@ define <vscale x 32 x i32> @vmax_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i3
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB82_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
index f6be882f742062..0b7ffb0cc48039 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
@@ -410,6 +410,7 @@ declare <vscale x 128 x i8> @llvm.vp.umax.nxv128i8(<vscale x 128 x i8>, <vscale
define <vscale x 128 x i8> @vmaxu_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv128i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -425,6 +426,7 @@ define <vscale x 128 x i8> @vmaxu_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB34_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
@@ -972,6 +974,7 @@ declare <vscale x 32 x i32> @llvm.vp.umax.nxv32i32(<vscale x 32 x i32>, <vscale
define <vscale x 32 x i32> @vmaxu_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv32i32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: srli a3, a2, 2
@@ -988,6 +991,7 @@ define <vscale x 32 x i32> @vmaxu_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB80_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
@@ -1032,6 +1036,7 @@ declare i32 @llvm.vscale.i32()
define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
; CHECK-LABEL: vmaxu_vx_nxv32i32_evl_nx8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a3, a1, 2
@@ -1048,6 +1053,7 @@ define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB82_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll
index 9ca78c872befd3..19e7cdaff0ffec 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll
@@ -658,6 +658,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16.f16(
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -705,6 +706,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16.f16(
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -752,6 +754,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16.f16(
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -799,6 +802,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16.f16(
define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -846,6 +850,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16.f16(
define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -893,6 +898,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32(
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -940,6 +946,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32(
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -987,6 +994,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32(
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -1034,6 +1042,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32(
define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -1081,6 +1090,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64(
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -1128,6 +1138,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64(
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -1175,6 +1186,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64(
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge.ll
index 7cf18a70158124..70dff36191e9e8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfge.ll
@@ -658,6 +658,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16.f16(
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -705,6 +706,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16.f16(
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -752,6 +754,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16.f16(
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -799,6 +802,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16.f16(
define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -846,6 +850,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16.f16(
define <vscale x 16 x i1> @intrinsic_vmfge_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -893,6 +898,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32(
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -940,6 +946,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32(
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -987,6 +994,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32(
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -1034,6 +1042,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32(
define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -1081,6 +1090,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64(
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -1128,6 +1138,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64(
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -1175,6 +1186,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64(
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll
index b78f2da4ae2540..19d0c1e6e0c822 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll
@@ -658,6 +658,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16.f16(
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -705,6 +706,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16.f16(
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -752,6 +754,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16.f16(
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -799,6 +802,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16.f16(
define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -846,6 +850,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16.f16(
define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -893,6 +898,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32(
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -940,6 +946,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32(
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -987,6 +994,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32(
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -1034,6 +1042,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32(
define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -1081,6 +1090,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64(
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -1128,6 +1138,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64(
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -1175,6 +1186,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64(
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle.ll
index 940e4d043f63f6..9c5bfdc969f2a5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfle.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfle.ll
@@ -658,6 +658,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16.f16(
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -705,6 +706,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16.f16(
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -752,6 +754,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16.f16(
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -799,6 +802,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16.f16(
define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -846,6 +850,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16.f16(
define <vscale x 16 x i1> @intrinsic_vmfle_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -893,6 +898,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32(
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -940,6 +946,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32(
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -987,6 +994,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32(
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -1034,6 +1042,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32(
define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -1081,6 +1090,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64(
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -1128,6 +1138,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64(
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -1175,6 +1186,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64(
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt.ll
index 10ddfb8f014ed7..c7eb5c20b72c9a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmflt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmflt.ll
@@ -658,6 +658,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16.f16(
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -705,6 +706,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16.f16(
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -752,6 +754,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16.f16(
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -799,6 +802,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16.f16(
define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -846,6 +850,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16.f16(
define <vscale x 16 x i1> @intrinsic_vmflt_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -893,6 +898,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32(
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -940,6 +946,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32(
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -987,6 +994,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32(
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -1034,6 +1042,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32(
define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -1081,6 +1090,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64(
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -1128,6 +1138,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64(
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -1175,6 +1186,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64(
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne.ll
index 4d8a95de1d3de7..23b5fb53b085b5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfne.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfne.ll
@@ -658,6 +658,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16(
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -705,6 +706,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16(
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -752,6 +754,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16(
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -799,6 +802,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16(
define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -846,6 +850,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16(
define <vscale x 16 x i1> @intrinsic_vmfne_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -893,6 +898,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32(
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -940,6 +946,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32(
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -987,6 +994,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32(
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -1034,6 +1042,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32(
define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -1081,6 +1090,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64(
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -1128,6 +1138,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64(
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -1175,6 +1186,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64(
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
index 8690014cc2c9df..f797dacc25a3d7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
@@ -412,6 +412,7 @@ declare <vscale x 128 x i8> @llvm.vp.smin.nxv128i8(<vscale x 128 x i8>, <vscale
define <vscale x 128 x i8> @vmin_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv128i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -427,6 +428,7 @@ define <vscale x 128 x i8> @vmin_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB34_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
@@ -974,6 +976,7 @@ declare <vscale x 32 x i32> @llvm.vp.smin.nxv32i32(<vscale x 32 x i32>, <vscale
define <vscale x 32 x i32> @vmin_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv32i32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: srli a3, a2, 2
@@ -990,6 +993,7 @@ define <vscale x 32 x i32> @vmin_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB80_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
@@ -1034,6 +1038,7 @@ declare i32 @llvm.vscale.i32()
define <vscale x 32 x i32> @vmin_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
; CHECK-LABEL: vmin_vx_nxv32i32_evl_nx8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a3, a1, 2
@@ -1050,6 +1055,7 @@ define <vscale x 32 x i32> @vmin_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i3
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB82_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
index 414807829d5630..7fd4125cff843e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
@@ -410,6 +410,7 @@ declare <vscale x 128 x i8> @llvm.vp.umin.nxv128i8(<vscale x 128 x i8>, <vscale
define <vscale x 128 x i8> @vminu_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv128i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -425,6 +426,7 @@ define <vscale x 128 x i8> @vminu_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB34_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
@@ -972,6 +974,7 @@ declare <vscale x 32 x i32> @llvm.vp.umin.nxv32i32(<vscale x 32 x i32>, <vscale
define <vscale x 32 x i32> @vminu_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv32i32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: srli a3, a2, 2
@@ -988,6 +991,7 @@ define <vscale x 32 x i32> @vminu_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB80_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
@@ -1032,6 +1036,7 @@ declare i32 @llvm.vscale.i32()
define <vscale x 32 x i32> @vminu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
; CHECK-LABEL: vminu_vx_nxv32i32_evl_nx8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a3, a1, 2
@@ -1048,6 +1053,7 @@ define <vscale x 32 x i32> @vminu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB82_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll
index d1f344d52763db..0be156aad1cb28 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll
@@ -31,6 +31,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -73,6 +74,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1(
define <vscale x 2 x i1> @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -115,6 +117,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1(
define <vscale x 4 x i1> @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -157,6 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1(
define <vscale x 8 x i1> @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -199,6 +203,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1(
define <vscale x 16 x i1> @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -241,6 +246,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1(
define <vscale x 32 x i1> @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -283,6 +289,7 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq.ll
index c5769e0d1e5192..868273c1ea1b32 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmseq.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmseq.ll
@@ -970,6 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1017,6 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1064,6 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1111,6 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1158,6 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1205,6 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1252,6 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1299,6 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1346,6 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1393,6 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1440,6 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1487,6 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1534,6 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1581,6 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1628,6 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1702,6 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1776,6 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1850,6 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1885,6 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1920,6 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1955,6 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -1990,6 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2025,6 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2060,6 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmseq_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2095,6 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2130,6 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2165,6 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2200,6 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2235,6 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2270,6 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2305,6 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2340,6 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2375,6 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2410,6 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2445,6 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2480,6 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge.ll
index e6d775dee5b22b..f3071bf7a2302a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsge.ll
@@ -971,6 +971,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1019,6 +1020,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1067,6 +1069,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1087,6 +1090,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8_1(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8_1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: li a1, 99
; CHECK-NEXT: vmv1r.v v0, v9
@@ -1154,6 +1158,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1202,6 +1207,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1250,6 +1256,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsge_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1298,6 +1305,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1346,6 +1354,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1394,6 +1403,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1442,6 +1452,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1490,6 +1501,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1538,6 +1550,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1586,6 +1599,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1634,6 +1648,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1682,6 +1697,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1757,6 +1773,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1832,6 +1849,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1907,6 +1925,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1942,6 +1961,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1977,6 +1997,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -2043,6 +2064,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2078,6 +2100,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2113,6 +2136,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2148,6 +2172,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsge_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2183,6 +2208,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2218,6 +2244,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2253,6 +2280,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2288,6 +2316,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2323,6 +2352,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2358,6 +2388,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2393,6 +2424,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2428,6 +2460,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2463,6 +2496,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2498,6 +2532,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2533,6 +2568,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2568,6 +2604,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
index 68aa912ae42ea4..d09c85c43647fb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
@@ -971,6 +971,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1019,6 +1020,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1067,6 +1069,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1115,6 +1118,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1163,6 +1167,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1211,6 +1216,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1259,6 +1265,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1307,6 +1314,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1355,6 +1363,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1403,6 +1412,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1451,6 +1461,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1499,6 +1510,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1547,6 +1559,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1595,6 +1608,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1643,6 +1657,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1718,6 +1733,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1793,6 +1809,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1868,6 +1885,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1903,6 +1921,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1938,6 +1957,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1973,6 +1993,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -1993,6 +2014,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8_1(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8_1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: li a1, 99
; CHECK-NEXT: vmv1r.v v0, v9
@@ -2029,6 +2051,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2064,6 +2087,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2099,6 +2123,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2134,6 +2159,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2232,6 +2258,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2267,6 +2294,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2302,6 +2330,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2337,6 +2366,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2372,6 +2402,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2407,6 +2438,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2442,6 +2474,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2477,6 +2510,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2512,6 +2546,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2547,6 +2582,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
index 98a03a2c562806..dd0e73d9bc1acb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
@@ -970,6 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1017,6 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1064,6 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1111,6 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1158,6 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1205,6 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1252,6 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1299,6 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1346,6 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1393,6 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1440,6 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1487,6 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1534,6 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1581,6 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1628,6 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1702,6 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1776,6 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1850,6 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1885,6 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1920,6 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1955,6 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -1990,6 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2025,6 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2060,6 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2095,6 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2130,6 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2165,6 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2200,6 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2235,6 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2270,6 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2305,6 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2340,6 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2375,6 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2410,6 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2445,6 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2480,6 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll
index 69b22573c289e5..f9e4b6aab11b7d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll
@@ -970,6 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1017,6 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1064,6 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1111,6 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1158,6 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1205,6 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1252,6 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1299,6 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1346,6 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1393,6 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1440,6 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1487,6 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1534,6 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1581,6 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1628,6 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1702,6 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1776,6 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1850,6 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1885,6 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1920,6 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1955,6 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -1990,6 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2025,6 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2060,6 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2095,6 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2130,6 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2165,6 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2200,6 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2235,6 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2270,6 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2305,6 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2340,6 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2375,6 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2410,6 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2445,6 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2480,6 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll
index 1dc52eb55455ba..065aeafd3f97b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsif.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll
@@ -31,6 +31,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1(
define <vscale x 1 x i1> @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -73,6 +74,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1(
define <vscale x 2 x i1> @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -115,6 +117,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1(
define <vscale x 4 x i1> @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -157,6 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1(
define <vscale x 8 x i1> @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1_nxv8i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -199,6 +203,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1(
define <vscale x 16 x i1> @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1_nxv16i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -241,6 +246,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1(
define <vscale x 32 x i1> @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1_nxv32i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -283,6 +289,7 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1(
define <vscale x 64 x i1> @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle.ll
index c8794e1b63900f..c12cf515d32e9f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsle.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsle.ll
@@ -970,6 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1017,6 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1064,6 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1111,6 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1158,6 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1205,6 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1252,6 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1299,6 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1346,6 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1393,6 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1440,6 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1487,6 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1534,6 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1581,6 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1628,6 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1702,6 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1776,6 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1850,6 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1885,6 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1920,6 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1955,6 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -1990,6 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2025,6 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2060,6 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsle_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2095,6 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2130,6 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2165,6 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2200,6 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2235,6 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2270,6 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2305,6 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2340,6 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2375,6 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2410,6 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2445,6 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2480,6 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll
index 86dc48d51cc2bc..a516834876a668 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll
@@ -970,6 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1017,6 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1064,6 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1111,6 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1158,6 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1205,6 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1252,6 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1299,6 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1346,6 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1393,6 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1440,6 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1487,6 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1534,6 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1581,6 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1628,6 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1702,6 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1776,6 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1850,6 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1885,6 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1920,6 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1955,6 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -1990,6 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2025,6 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2060,6 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2095,6 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2130,6 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2165,6 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2200,6 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2235,6 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2270,6 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2305,6 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2340,6 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2375,6 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2410,6 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2445,6 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2480,6 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt.ll
index 8d57f2adc53868..afef755ddec915 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmslt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmslt.ll
@@ -970,6 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1017,6 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1064,6 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1111,6 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1158,6 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1205,6 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1252,6 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1299,6 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1346,6 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1393,6 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1440,6 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1487,6 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1534,6 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1581,6 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1628,6 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1702,6 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1776,6 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1850,6 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1885,6 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1920,6 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1955,6 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -1990,6 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2025,6 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2060,6 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2095,6 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2130,6 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2165,6 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2200,6 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2235,6 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2270,6 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2305,6 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2340,6 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2375,6 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2410,6 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2445,6 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2480,6 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll
index 627b5943087137..beaf809a83ab10 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll
@@ -970,6 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1017,6 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1064,6 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1111,6 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1158,6 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1205,6 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1252,6 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1299,6 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1346,6 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1393,6 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1440,6 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1487,6 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1534,6 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1581,6 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1628,6 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1702,6 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1776,6 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1850,6 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1885,6 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1920,6 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1955,6 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -1990,6 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2025,6 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2060,6 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2095,6 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2130,6 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2165,6 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2200,6 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2235,6 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2270,6 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2305,6 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2340,6 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2375,6 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2410,6 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2445,6 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2480,6 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne.ll
index 47d1048f46cab4..dc3a50ad7bd687 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsne.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsne.ll
@@ -970,6 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1017,6 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1064,6 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1111,6 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1158,6 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1205,6 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1252,6 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1299,6 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1346,6 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1393,6 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1440,6 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1487,6 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1534,6 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1581,6 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1628,6 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1702,6 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1776,6 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1850,6 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1885,6 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1920,6 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1955,6 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -1990,6 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2025,6 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2060,6 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsne_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2095,6 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2130,6 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2165,6 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2200,6 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2235,6 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2270,6 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2305,6 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2340,6 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2375,6 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2410,6 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2445,6 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2480,6 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll
index b0a28e6e455b07..38925934db2f28 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsof.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll
@@ -31,6 +31,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1(
define <vscale x 1 x i1> @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -73,6 +74,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1(
define <vscale x 2 x i1> @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -115,6 +117,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1(
define <vscale x 4 x i1> @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -157,6 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1(
define <vscale x 8 x i1> @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1_nxv8i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -199,6 +203,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1(
define <vscale x 16 x i1> @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1_nxv16i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -241,6 +246,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1(
define <vscale x 32 x i1> @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1_nxv32i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -283,6 +289,7 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1(
define <vscale x 64 x i1> @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
index 7f248a39b54fa9..33e88360e271c7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
@@ -49,6 +49,7 @@ define <vscale x 4 x i32> @vadd_same_passthru(<vscale x 4 x i32> %passthru, <vsc
define <vscale x 4 x i32> @unfoldable_diff_avl_unknown(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1, iXLen %vl2) {
; CHECK-LABEL: unfoldable_diff_avl_unknown:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v14, v8
; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
; CHECK-NEXT: vadd.vv v14, v10, v12
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll b/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll
index f7ca65801dc874..47d9f3fde7cd06 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll
@@ -5,6 +5,7 @@
define iXLen @bool_vec(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m, i32 %evl) {
; RV32-LABEL: bool_vec:
; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v9, v0
; RV32-NEXT: vmv1r.v v0, v8
; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
@@ -17,6 +18,7 @@ define iXLen @bool_vec(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m, i32 %evl) {
;
; RV64-LABEL: bool_vec:
; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v9, v0
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: srli a0, a0, 32
@@ -35,6 +37,7 @@ define iXLen @bool_vec(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m, i32 %evl) {
define iXLen @bool_vec_zero_poison(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m, i32 %evl) {
; RV32-LABEL: bool_vec_zero_poison:
; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v9, v0
; RV32-NEXT: vmv1r.v v0, v8
; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
@@ -43,6 +46,7 @@ define iXLen @bool_vec_zero_poison(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m,
;
; RV64-LABEL: bool_vec_zero_poison:
; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v9, v0
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: srli a0, a0, 32
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-select.ll b/llvm/test/CodeGen/RISCV/rvv/vp-select.ll
index c8a048971a803d..912abd6dc36dcc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-select.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-select.ll
@@ -12,6 +12,7 @@ define <vscale x 1 x i64> @all_ones(<vscale x 1 x i64> %true, <vscale x 1 x i64>
define <vscale x 1 x i64> @all_zeroes(<vscale x 1 x i64> %true, <vscale x 1 x i64> %false, i32 %evl) {
; CHECK-LABEL: all_zeroes:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> splat (i1 false), <vscale x 1 x i64> %true, <vscale x 1 x i64> %false, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
index 2a137099bcb0f4..6839bb647b5358 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
@@ -10,6 +10,7 @@ declare <16 x i1> @llvm.experimental.vp.splice.v16i1(<16 x i1>, <16 x i1>, i32,
define <2 x i1> @test_vp_splice_v2i1(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v2i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -34,6 +35,7 @@ define <2 x i1> @test_vp_splice_v2i1(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %ev
define <2 x i1> @test_vp_splice_v2i1_negative_offset(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v2i1_negative_offset:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -58,6 +60,7 @@ define <2 x i1> @test_vp_splice_v2i1_negative_offset(<2 x i1> %va, <2 x i1> %vb,
define <2 x i1> @test_vp_splice_v2i1_masked(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v2i1_masked:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -83,6 +86,7 @@ define <2 x i1> @test_vp_splice_v2i1_masked(<2 x i1> %va, <2 x i1> %vb, <2 x i1>
define <4 x i1> @test_vp_splice_v4i1(<4 x i1> %va, <4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v4i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -107,6 +111,7 @@ define <4 x i1> @test_vp_splice_v4i1(<4 x i1> %va, <4 x i1> %vb, i32 zeroext %ev
define <4 x i1> @test_vp_splice_v4i1_negative_offset(<4 x i1> %va, <4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v4i1_negative_offset:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -131,6 +136,7 @@ define <4 x i1> @test_vp_splice_v4i1_negative_offset(<4 x i1> %va, <4 x i1> %vb,
define <4 x i1> @test_vp_splice_v4i1_masked(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v4i1_masked:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -156,6 +162,7 @@ define <4 x i1> @test_vp_splice_v4i1_masked(<4 x i1> %va, <4 x i1> %vb, <4 x i1>
define <8 x i1> @test_vp_splice_v8i1(<8 x i1> %va, <8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v8i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -180,6 +187,7 @@ define <8 x i1> @test_vp_splice_v8i1(<8 x i1> %va, <8 x i1> %vb, i32 zeroext %ev
define <8 x i1> @test_vp_splice_v8i1_negative_offset(<8 x i1> %va, <8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v8i1_negative_offset:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -204,6 +212,7 @@ define <8 x i1> @test_vp_splice_v8i1_negative_offset(<8 x i1> %va, <8 x i1> %vb,
define <8 x i1> @test_vp_splice_v8i1_masked(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v8i1_masked:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -229,6 +238,7 @@ define <8 x i1> @test_vp_splice_v8i1_masked(<8 x i1> %va, <8 x i1> %vb, <8 x i1>
define <16 x i1> @test_vp_splice_v16i1(<16 x i1> %va, <16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v16i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -253,6 +263,7 @@ define <16 x i1> @test_vp_splice_v16i1(<16 x i1> %va, <16 x i1> %vb, i32 zeroext
define <16 x i1> @test_vp_splice_v16i1_negative_offset(<16 x i1> %va, <16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v16i1_negative_offset:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -277,6 +288,7 @@ define <16 x i1> @test_vp_splice_v16i1_negative_offset(<16 x i1> %va, <16 x i1>
define <16 x i1> @test_vp_splice_v16i1_masked(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v16i1_masked:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
index fc446d0a3a88ac..9a8c90efe526ef 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
@@ -13,6 +13,7 @@ declare <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i
define <vscale x 1 x i1> @test_vp_splice_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv1i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -37,6 +38,7 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1(<vscale x 1 x i1> %va, <vscale x
define <vscale x 1 x i1> @test_vp_splice_nxv1i1_negative_offset(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv1i1_negative_offset:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -61,6 +63,7 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1_negative_offset(<vscale x 1 x i1
define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, <vscale x 1 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv1i1_masked:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -86,6 +89,7 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <v
define <vscale x 2 x i1> @test_vp_splice_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv2i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -110,6 +114,7 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1(<vscale x 2 x i1> %va, <vscale x
define <vscale x 2 x i1> @test_vp_splice_nxv2i1_negative_offset(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv2i1_negative_offset:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -134,6 +139,7 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1_negative_offset(<vscale x 2 x i1
define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv2i1_masked:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -159,6 +165,7 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <v
define <vscale x 4 x i1> @test_vp_splice_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv4i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -183,6 +190,7 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1(<vscale x 4 x i1> %va, <vscale x
define <vscale x 4 x i1> @test_vp_splice_nxv4i1_negative_offset(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv4i1_negative_offset:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -207,6 +215,7 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1_negative_offset(<vscale x 4 x i1
define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, <vscale x 4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv4i1_masked:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -232,6 +241,7 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <v
define <vscale x 8 x i1> @test_vp_splice_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv8i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -256,6 +266,7 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1(<vscale x 8 x i1> %va, <vscale x
define <vscale x 8 x i1> @test_vp_splice_nxv8i1_negative_offset(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv8i1_negative_offset:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -280,6 +291,7 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1_negative_offset(<vscale x 8 x i1
define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, <vscale x 8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv8i1_masked:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -305,6 +317,7 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <v
define <vscale x 16 x i1> @test_vp_splice_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv16i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -329,6 +342,7 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1(<vscale x 16 x i1> %va, <vscal
define <vscale x 16 x i1> @test_vp_splice_nxv16i1_negative_offset(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv16i1_negative_offset:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -353,6 +367,7 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1_negative_offset(<vscale x 16 x
define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, <vscale x 16 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv16i1_masked:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -379,6 +394,7 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va,
define <vscale x 32 x i1> @test_vp_splice_nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv32i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -403,6 +419,7 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1(<vscale x 32 x i1> %va, <vscal
define <vscale x 32 x i1> @test_vp_splice_nxv32i1_negative_offset(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv32i1_negative_offset:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -427,6 +444,7 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1_negative_offset(<vscale x 32 x
define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, <vscale x 32 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv32i1_masked:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -453,6 +471,7 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va,
define <vscale x 64 x i1> @test_vp_splice_nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv64i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -477,6 +496,7 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1(<vscale x 64 x i1> %va, <vscal
define <vscale x 64 x i1> @test_vp_splice_nxv64i1_negative_offset(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv64i1_negative_offset:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -501,6 +521,7 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1_negative_offset(<vscale x 64 x
define <vscale x 64 x i1> @test_vp_splice_nxv64i1_masked(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, <vscale x 64 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv64i1_masked:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
index 3e423c8ec99030..b13f146134ecb8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
@@ -258,6 +258,7 @@ declare <vscale x 32 x i8> @llvm.vp.gather.nxv32i8.nxv32p0(<vscale x 32 x ptr>,
define <vscale x 32 x i8> @vpgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8> %idxs, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_nxv32i8:
; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v12, v0
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a2, a3, 1
@@ -285,6 +286,7 @@ define <vscale x 32 x i8> @vpgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8
;
; RV64-LABEL: vpgather_baseidx_nxv32i8:
; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: csrr a2, vlenb
; RV64-NEXT: slli a3, a2, 1
@@ -2457,6 +2459,7 @@ declare <vscale x 16 x double> @llvm.vp.gather.nxv16f64.nxv16p0(<vscale x 16 x p
define <vscale x 16 x double> @vpgather_nxv16f64(<vscale x 16 x ptr> %ptrs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv16f64:
; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v24, v0
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: sub a2, a0, a1
@@ -2472,6 +2475,7 @@ define <vscale x 16 x double> @vpgather_nxv16f64(<vscale x 16 x ptr> %ptrs, <vsc
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB111_2:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v0, v24
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v24, (zero), v8, v0.t
@@ -2480,6 +2484,7 @@ define <vscale x 16 x double> @vpgather_nxv16f64(<vscale x 16 x ptr> %ptrs, <vsc
;
; RV64-LABEL: vpgather_nxv16f64:
; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v24, v0
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: sub a2, a0, a1
@@ -2495,6 +2500,7 @@ define <vscale x 16 x double> @vpgather_nxv16f64(<vscale x 16 x ptr> %ptrs, <vsc
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB111_2:
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
@@ -2506,6 +2512,7 @@ define <vscale x 16 x double> @vpgather_nxv16f64(<vscale x 16 x ptr> %ptrs, <vsc
define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_nxv16i16_nxv16f64:
; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v12, v0
; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
; RV32-NEXT: vsext.vf2 v16, v8
@@ -2524,6 +2531,7 @@ define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vs
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB112_2:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v0, v12
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
@@ -2531,6 +2539,7 @@ define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vs
;
; RV64-LABEL: vpgather_baseidx_nxv16i16_nxv16f64:
; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v10
@@ -2552,6 +2561,7 @@ define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vs
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a1, a2
; RV64-NEXT: .LBB112_2:
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t
@@ -2564,6 +2574,7 @@ define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vs
define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_sext_nxv16i16_nxv16f64:
; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v12, v0
; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
; RV32-NEXT: vsext.vf2 v16, v8
@@ -2582,6 +2593,7 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB113_2:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v0, v12
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
@@ -2589,6 +2601,7 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base
;
; RV64-LABEL: vpgather_baseidx_sext_nxv16i16_nxv16f64:
; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v10
@@ -2610,6 +2623,7 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a1, a2
; RV64-NEXT: .LBB113_2:
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t
@@ -2623,6 +2637,7 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base
define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv16i16_nxv16f64:
; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v12, v0
; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
; RV32-NEXT: vzext.vf2 v16, v8
@@ -2641,6 +2656,7 @@ define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(ptr %base
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB114_2:
+; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v0, v12
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
@@ -2648,6 +2664,7 @@ define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(ptr %base
;
; RV64-LABEL: vpgather_baseidx_zext_nxv16i16_nxv16f64:
; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: vsetvli a2, zero, e32, m8, ta, ma
; RV64-NEXT: vzext.vf2 v16, v8
@@ -2666,6 +2683,7 @@ define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(ptr %base
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a1, a2
; RV64-NEXT: .LBB114_2:
+; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei32.v v8, (a0), v24, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpload.ll b/llvm/test/CodeGen/RISCV/rvv/vpload.ll
index bd7ea6c19d0b30..c0848fb25a6a9e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpload.ll
@@ -522,6 +522,7 @@ declare <vscale x 16 x double> @llvm.vp.load.nxv16f64.p0(ptr, <vscale x 16 x i1>
define <vscale x 16 x double> @vpload_nxv16f64(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv16f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: sub a3, a1, a2
@@ -539,6 +540,7 @@ define <vscale x 16 x double> @vpload_nxv16f64(ptr %ptr, <vscale x 16 x i1> %m,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB44_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
@@ -561,6 +563,7 @@ declare <vscale x 16 x double> @llvm.vector.extract.nxv16f64(<vscale x 17 x doub
define <vscale x 16 x double> @vpload_nxv17f64(ptr %ptr, ptr %out, <vscale x 17 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv17f64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: slli a5, a3, 1
@@ -599,6 +602,7 @@ define <vscale x 16 x double> @vpload_nxv17f64(ptr %ptr, ptr %out, <vscale x 17
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a4, a3
; CHECK-NEXT: .LBB45_6:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
index f029d0b1b01bc0..df238f2f01b5a9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
@@ -361,6 +361,7 @@ define <vscale x 128 x i8> @vpmerge_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vmv8r.v v24, v16
; CHECK-NEXT: addi a1, sp, 16
@@ -382,6 +383,7 @@ define <vscale x 128 x i8> @vpmerge_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a1
; CHECK-NEXT: .LBB28_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
@@ -401,6 +403,7 @@ define <vscale x 128 x i8> @vpmerge_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
define <vscale x 128 x i8> @vpmerge_vx_nxv128i8(i8 %a, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpmerge_vx_nxv128i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -416,6 +419,7 @@ define <vscale x 128 x i8> @vpmerge_vx_nxv128i8(i8 %a, <vscale x 128 x i8> %vb,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB29_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, tu, ma
; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
@@ -429,6 +433,7 @@ define <vscale x 128 x i8> @vpmerge_vx_nxv128i8(i8 %a, <vscale x 128 x i8> %vb,
define <vscale x 128 x i8> @vpmerge_vi_nxv128i8(<vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpmerge_vi_nxv128i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
@@ -444,6 +449,7 @@ define <vscale x 128 x i8> @vpmerge_vi_nxv128i8(<vscale x 128 x i8> %vb, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB30_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
; CHECK-NEXT: vmerge.vim v8, v8, 2, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
index 8978dc268d4e52..c3c259e10172d7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
@@ -468,6 +468,7 @@ define void @vpstore_nxv17f64(<vscale x 17 x double> %val, ptr %ptr, <vscale x 1
; CHECK-NEXT: slli a3, a3, 3
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
@@ -483,6 +484,7 @@ define void @vpstore_nxv17f64(<vscale x 17 x double> %val, ptr %ptr, <vscale x 1
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a6, a3
; CHECK-NEXT: .LBB36_4:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vl8re64.v v16, (a0)
; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
index 39666bb6119a0f..31d76699b17d0c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
@@ -23,6 +23,7 @@ declare i1 @llvm.vp.reduce.or.nxv1i1(i1, <vscale x 1 x i1>, <vscale x 1 x i1>, i
define zeroext i1 @vpreduce_or_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv1i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -39,6 +40,7 @@ declare i1 @llvm.vp.reduce.xor.nxv1i1(i1, <vscale x 1 x i1>, <vscale x 1 x i1>,
define zeroext i1 @vpreduce_xor_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv1i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -71,6 +73,7 @@ declare i1 @llvm.vp.reduce.or.nxv2i1(i1, <vscale x 2 x i1>, <vscale x 2 x i1>, i
define zeroext i1 @vpreduce_or_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv2i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -87,6 +90,7 @@ declare i1 @llvm.vp.reduce.xor.nxv2i1(i1, <vscale x 2 x i1>, <vscale x 2 x i1>,
define zeroext i1 @vpreduce_xor_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv2i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -119,6 +123,7 @@ declare i1 @llvm.vp.reduce.or.nxv4i1(i1, <vscale x 4 x i1>, <vscale x 4 x i1>, i
define zeroext i1 @vpreduce_or_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv4i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -135,6 +140,7 @@ declare i1 @llvm.vp.reduce.xor.nxv4i1(i1, <vscale x 4 x i1>, <vscale x 4 x i1>,
define zeroext i1 @vpreduce_xor_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv4i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -167,6 +173,7 @@ declare i1 @llvm.vp.reduce.or.nxv8i1(i1, <vscale x 8 x i1>, <vscale x 8 x i1>, i
define zeroext i1 @vpreduce_or_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv8i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -183,6 +190,7 @@ declare i1 @llvm.vp.reduce.xor.nxv8i1(i1, <vscale x 8 x i1>, <vscale x 8 x i1>,
define zeroext i1 @vpreduce_xor_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv8i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -215,6 +223,7 @@ declare i1 @llvm.vp.reduce.or.nxv16i1(i1, <vscale x 16 x i1>, <vscale x 16 x i1>
define zeroext i1 @vpreduce_or_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv16i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -231,6 +240,7 @@ declare i1 @llvm.vp.reduce.xor.nxv16i1(i1, <vscale x 16 x i1>, <vscale x 16 x i1
define zeroext i1 @vpreduce_xor_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv16i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -263,6 +273,7 @@ declare i1 @llvm.vp.reduce.or.nxv32i1(i1, <vscale x 32 x i1>, <vscale x 32 x i1>
define zeroext i1 @vpreduce_or_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv32i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -279,6 +290,7 @@ declare i1 @llvm.vp.reduce.xor.nxv32i1(i1, <vscale x 32 x i1>, <vscale x 32 x i1
define zeroext i1 @vpreduce_xor_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv32i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -295,6 +307,7 @@ declare i1 @llvm.vp.reduce.or.nxv40i1(i1, <vscale x 40 x i1>, <vscale x 40 x i1>
define zeroext i1 @vpreduce_or_nxv40i1(i1 zeroext %s, <vscale x 40 x i1> %v, <vscale x 40 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv40i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -327,6 +340,7 @@ declare i1 @llvm.vp.reduce.or.nxv64i1(i1, <vscale x 64 x i1>, <vscale x 64 x i1>
define zeroext i1 @vpreduce_or_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv64i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -343,6 +357,7 @@ declare i1 @llvm.vp.reduce.xor.nxv64i1(i1, <vscale x 64 x i1>, <vscale x 64 x i1
define zeroext i1 @vpreduce_xor_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv64i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -359,27 +374,29 @@ declare i1 @llvm.vp.reduce.or.nxv128i1(i1, <vscale x 128 x i1>, <vscale x 128 x
define zeroext i1 @vpreduce_or_nxv128i1(i1 zeroext %s, <vscale x 128 x i1> %v, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv128i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: sub a3, a1, a2
-; CHECK-NEXT: sltu a4, a1, a3
+; CHECK-NEXT: csrr a3, vlenb
+; CHECK-NEXT: slli a3, a3, 3
+; CHECK-NEXT: sub a2, a1, a3
+; CHECK-NEXT: sltu a4, a1, a2
; CHECK-NEXT: addi a4, a4, -1
-; CHECK-NEXT: and a3, a4, a3
+; CHECK-NEXT: and a2, a4, a2
; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
-; CHECK-NEXT: vcpop.m a3, v8, v0.t
-; CHECK-NEXT: snez a3, a3
-; CHECK-NEXT: bltu a1, a2, .LBB22_2
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; CHECK-NEXT: vcpop.m a2, v8, v0.t
+; CHECK-NEXT: snez a2, a2
+; CHECK-NEXT: bltu a1, a3, .LBB22_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a1, a2
+; CHECK-NEXT: mv a1, a3
; CHECK-NEXT: .LBB22_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vcpop.m a1, v11, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
-; CHECK-NEXT: or a0, a3, a0
+; CHECK-NEXT: or a0, a2, a0
; CHECK-NEXT: ret
%r = call i1 @llvm.vp.reduce.or.nxv128i1(i1 %s, <vscale x 128 x i1> %v, <vscale x 128 x i1> %m, i32 %evl)
ret i1 %r
@@ -390,6 +407,7 @@ declare i1 @llvm.vp.reduce.add.nxv1i1(i1, <vscale x 1 x i1>, <vscale x 1 x i1>,
define zeroext i1 @vpreduce_add_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv1i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -406,6 +424,7 @@ declare i1 @llvm.vp.reduce.add.nxv2i1(i1, <vscale x 2 x i1>, <vscale x 2 x i1>,
define zeroext i1 @vpreduce_add_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv2i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -422,6 +441,7 @@ declare i1 @llvm.vp.reduce.add.nxv4i1(i1, <vscale x 4 x i1>, <vscale x 4 x i1>,
define zeroext i1 @vpreduce_add_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv4i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -438,6 +458,7 @@ declare i1 @llvm.vp.reduce.add.nxv8i1(i1, <vscale x 8 x i1>, <vscale x 8 x i1>,
define zeroext i1 @vpreduce_add_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv8i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -454,6 +475,7 @@ declare i1 @llvm.vp.reduce.add.nxv16i1(i1, <vscale x 16 x i1>, <vscale x 16 x i1
define zeroext i1 @vpreduce_add_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv16i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -470,6 +492,7 @@ declare i1 @llvm.vp.reduce.add.nxv32i1(i1, <vscale x 32 x i1>, <vscale x 32 x i1
define zeroext i1 @vpreduce_add_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv32i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -486,6 +509,7 @@ declare i1 @llvm.vp.reduce.add.nxv64i1(i1, <vscale x 64 x i1>, <vscale x 64 x i1
define zeroext i1 @vpreduce_add_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv64i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -615,6 +639,7 @@ declare i1 @llvm.vp.reduce.smin.nxv1i1(i1, <vscale x 1 x i1>, <vscale x 1 x i1>,
define zeroext i1 @vpreduce_smin_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv1i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -631,6 +656,7 @@ declare i1 @llvm.vp.reduce.smin.nxv2i1(i1, <vscale x 2 x i1>, <vscale x 2 x i1>,
define zeroext i1 @vpreduce_smin_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv2i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -647,6 +673,7 @@ declare i1 @llvm.vp.reduce.smin.nxv4i1(i1, <vscale x 4 x i1>, <vscale x 4 x i1>,
define zeroext i1 @vpreduce_smin_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv4i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -663,6 +690,7 @@ declare i1 @llvm.vp.reduce.smin.nxv8i1(i1, <vscale x 8 x i1>, <vscale x 8 x i1>,
define zeroext i1 @vpreduce_smin_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv8i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -679,6 +707,7 @@ declare i1 @llvm.vp.reduce.smin.nxv16i1(i1, <vscale x 16 x i1>, <vscale x 16 x i
define zeroext i1 @vpreduce_smin_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv16i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -695,6 +724,7 @@ declare i1 @llvm.vp.reduce.smin.nxv32i1(i1, <vscale x 32 x i1>, <vscale x 32 x i
define zeroext i1 @vpreduce_smin_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv32i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -711,6 +741,7 @@ declare i1 @llvm.vp.reduce.smin.nxv64i1(i1, <vscale x 64 x i1>, <vscale x 64 x i
define zeroext i1 @vpreduce_smin_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv64i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -727,6 +758,7 @@ declare i1 @llvm.vp.reduce.umax.nxv1i1(i1, <vscale x 1 x i1>, <vscale x 1 x i1>,
define zeroext i1 @vpreduce_umax_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv1i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -743,6 +775,7 @@ declare i1 @llvm.vp.reduce.umax.nxv2i1(i1, <vscale x 2 x i1>, <vscale x 2 x i1>,
define zeroext i1 @vpreduce_umax_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv2i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -759,6 +792,7 @@ declare i1 @llvm.vp.reduce.umax.nxv4i1(i1, <vscale x 4 x i1>, <vscale x 4 x i1>,
define zeroext i1 @vpreduce_umax_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv4i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -775,6 +809,7 @@ declare i1 @llvm.vp.reduce.umax.nxv8i1(i1, <vscale x 8 x i1>, <vscale x 8 x i1>,
define zeroext i1 @vpreduce_umax_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv8i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -791,6 +826,7 @@ declare i1 @llvm.vp.reduce.umax.nxv16i1(i1, <vscale x 16 x i1>, <vscale x 16 x i
define zeroext i1 @vpreduce_umax_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv16i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -807,6 +843,7 @@ declare i1 @llvm.vp.reduce.umax.nxv32i1(i1, <vscale x 32 x i1>, <vscale x 32 x i
define zeroext i1 @vpreduce_umax_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv32i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -823,6 +860,7 @@ declare i1 @llvm.vp.reduce.umax.nxv64i1(i1, <vscale x 64 x i1>, <vscale x 64 x i
define zeroext i1 @vpreduce_umax_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv64i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
index 1779fc12095e88..9fc86aee775319 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
@@ -22,6 +22,7 @@ define internal void @foo(<vscale x 1 x i16> %v15, <vscale x 1 x i16> %0, <vscal
; NOSUBREG-NEXT: .LBB0_1: # %loopIR3.i.i
; NOSUBREG-NEXT: # =>This Inner Loop Header: Depth=1
; NOSUBREG-NEXT: vl1r.v v9, (zero)
+; NOSUBREG-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; NOSUBREG-NEXT: vmv1r.v v13, v12
; NOSUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; NOSUBREG-NEXT: vrgatherei16.vv v13, v9, v10
@@ -42,6 +43,7 @@ define internal void @foo(<vscale x 1 x i16> %v15, <vscale x 1 x i16> %0, <vscal
; SUBREG-NEXT: .LBB0_1: # %loopIR3.i.i
; SUBREG-NEXT: # =>This Inner Loop Header: Depth=1
; SUBREG-NEXT: vl1r.v v9, (zero)
+; SUBREG-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; SUBREG-NEXT: vmv1r.v v13, v12
; SUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; SUBREG-NEXT: vrgatherei16.vv v13, v9, v10
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
index 12c439346e3569..b483748064cdae 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
@@ -572,6 +572,7 @@ declare <vscale x 128 x i8> @llvm.vp.sadd.sat.nxv128i8(<vscale x 128 x i8>, <vsc
define <vscale x 128 x i8> @vsadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsadd_vi_nxv128i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
@@ -587,6 +588,7 @@ define <vscale x 128 x i8> @vsadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB50_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
@@ -1350,6 +1352,7 @@ declare <vscale x 32 x i32> @llvm.vp.sadd.sat.nxv32i32(<vscale x 32 x i32>, <vsc
define <vscale x 32 x i32> @vsadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsadd_vi_nxv32i32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -1366,6 +1369,7 @@ define <vscale x 32 x i32> @vsadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
index d962f703abfd22..3bb4ae1ec1e723 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
@@ -571,6 +571,7 @@ declare <vscale x 128 x i8> @llvm.vp.uadd.sat.nxv128i8(<vscale x 128 x i8>, <vsc
define <vscale x 128 x i8> @vsaddu_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsaddu_vi_nxv128i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
@@ -586,6 +587,7 @@ define <vscale x 128 x i8> @vsaddu_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB50_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
@@ -1349,6 +1351,7 @@ declare <vscale x 32 x i32> @llvm.vp.uadd.sat.nxv32i32(<vscale x 32 x i32>, <vsc
define <vscale x 32 x i32> @vsaddu_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsaddu_vi_nxv32i32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -1365,6 +1368,7 @@ define <vscale x 32 x i32> @vsaddu_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-bf16.ll
index a63d14e8b6c04e..d176ccf6fabdad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-bf16.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-bf16.ll
@@ -126,6 +126,7 @@ define <vscale x 8 x bfloat> @vmerge_truelhs_nxv8bf16_0(<vscale x 8 x bfloat> %v
define <vscale x 8 x bfloat> @vmerge_falselhs_nxv8bf16_0(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) {
; CHECK-LABEL: vmerge_falselhs_nxv8bf16_0:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%vc = select <vscale x 8 x i1> zeroinitializer, <vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
index 1fc33dc73a27dc..6b9c5fe7b72189 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
@@ -175,6 +175,7 @@ define <vscale x 8 x half> @vmerge_truelhs_nxv8f16_0(<vscale x 8 x half> %va, <v
define <vscale x 8 x half> @vmerge_falselhs_nxv8f16_0(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
; CHECK-LABEL: vmerge_falselhs_nxv8f16_0:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%vc = select <vscale x 8 x i1> zeroinitializer, <vscale x 8 x half> %va, <vscale x 8 x half> %vb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll
index 9cafa28eb429f1..a0eab1c61709e9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll
@@ -803,6 +803,7 @@ define <vscale x 8 x i64> @vmerge_truelhs_nxv8i64_0(<vscale x 8 x i64> %va, <vsc
define <vscale x 8 x i64> @vmerge_falselhs_nxv8i64_0(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
; CHECK-LABEL: vmerge_falselhs_nxv8i64_0:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%vc = select <vscale x 8 x i1> zeroinitializer, <vscale x 8 x i64> %va, <vscale x 8 x i64> %vb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
index bb51f0592dc17a..2c1d6691fb83c1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
@@ -362,6 +362,7 @@ define <vscale x 32 x i32> @select_nxv32i32(<vscale x 32 x i1> %a, <vscale x 32
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: slli a4, a3, 3
@@ -384,6 +385,7 @@ define <vscale x 32 x i32> @select_nxv32i32(<vscale x 32 x i1> %a, <vscale x 32
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB27_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -421,6 +423,7 @@ define <vscale x 32 x i32> @select_evl_nxv32i32(<vscale x 32 x i1> %a, <vscale x
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a3, a1, 3
@@ -443,6 +446,7 @@ define <vscale x 32 x i32> @select_evl_nxv32i32(<vscale x 32 x i1> %a, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB28_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -710,6 +714,7 @@ define <vscale x 16 x double> @select_nxv16f64(<vscale x 16 x i1> %a, <vscale x
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a3, a1, 3
@@ -731,6 +736,7 @@ define <vscale x 16 x double> @select_nxv16f64(<vscale x 16 x i1> %a, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB48_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -835,6 +841,7 @@ define <vscale x 2 x i1> @select_cond_x_cond(<vscale x 2 x i1> %x, <vscale x 2 x
define <vscale x 2 x i1> @select_undef_T_F(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, i32 zeroext %evl) {
; CHECK-LABEL: select_undef_T_F:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> poison, <vscale x 2 x i1> %x, <vscale x 2 x i1> %y, i32 %evl)
@@ -852,6 +859,7 @@ define <vscale x 2 x i1> @select_undef_undef_F(<vscale x 2 x i1> %x, i32 zeroext
define <vscale x 2 x i1> @select_unknown_undef_F(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, i32 zeroext %evl) {
; CHECK-LABEL: select_unknown_undef_F:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> %x, <vscale x 2 x i1> undef, <vscale x 2 x i1> %y, i32 %evl)
@@ -861,6 +869,7 @@ define <vscale x 2 x i1> @select_unknown_undef_F(<vscale x 2 x i1> %x, <vscale x
define <vscale x 2 x i1> @select_unknown_T_undef(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, i32 zeroext %evl) {
; CHECK-LABEL: select_unknown_T_undef:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, <vscale x 2 x i1> poison, i32 %evl)
@@ -870,6 +879,7 @@ define <vscale x 2 x i1> @select_unknown_T_undef(<vscale x 2 x i1> %x, <vscale x
define <vscale x 2 x i1> @select_false_T_F(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, <vscale x 2 x i1> %z, i32 zeroext %evl) {
; CHECK-LABEL: select_false_T_F:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: ret
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> zeroinitializer, <vscale x 2 x i1> %y, <vscale x 2 x i1> %z, i32 %evl)
@@ -879,6 +889,7 @@ define <vscale x 2 x i1> @select_false_T_F(<vscale x 2 x i1> %x, <vscale x 2 x i
define <vscale x 2 x i1> @select_unknown_T_T(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, i32 zeroext %evl) {
; CHECK-LABEL: select_unknown_T_T:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, <vscale x 2 x i1> %y, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll
index 33acfb7dceb949..70d3f306542548 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll
@@ -18,6 +18,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
define <2 x double> @fixed_length(<2 x double> %a, <2 x double> %b) nounwind {
; CHECK-LABEL: fixed_length:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: # kill: def $v11 killed $v10
; CHECK-NEXT: # kill: def $v9 killed $v8
@@ -36,6 +37,7 @@ entry:
define <vscale x 1 x double> @scalable(<vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
; CHECK-LABEL: scalable:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: # implicit-def: $v9
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
@@ -53,6 +55,7 @@ entry:
define <vscale x 1 x double> @intrinsic_same_vlmax(<vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
; CHECK-LABEL: intrinsic_same_vlmax:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
; CHECK-NEXT: # implicit-def: $v9
@@ -81,6 +84,7 @@ entry:
define <vscale x 1 x double> @intrinsic_same_avl_imm(<vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
; CHECK-LABEL: intrinsic_same_avl_imm:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vsetivli a0, 2, e32, mf2, ta, ma
; CHECK-NEXT: # implicit-def: $v9
@@ -108,6 +112,7 @@ entry:
define <vscale x 1 x double> @intrinsic_same_avl_reg(i64 %avl, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
; CHECK-LABEL: intrinsic_same_avl_reg:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, ma
; CHECK-NEXT: # implicit-def: $v9
@@ -135,6 +140,7 @@ entry:
define <vscale x 1 x double> @intrinsic_diff_avl_reg(i64 %avl, i64 %avl2, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
; CHECK-LABEL: intrinsic_diff_avl_reg:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, ma
; CHECK-NEXT: # implicit-def: $v9
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
index b0cb6bc6125ddf..ae59358505a752 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
@@ -377,6 +377,7 @@ entry:
define <vscale x 1 x double> @test19(<vscale x 1 x double> %a, double %b) nounwind {
; CHECK-LABEL: test19:
; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll
index d3b905ef897b1b..85ffa9d5965a12 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll
@@ -151,6 +151,7 @@ declare <vscale x 32 x i32> @llvm.vp.sext.nxv32i32.nxv32i8(<vscale x 32 x i8>, <
define <vscale x 32 x i32> @vsext_nxv32i8_nxv32i32(<vscale x 32 x i8> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vsext_nxv32i8_nxv32i32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -167,6 +168,7 @@ define <vscale x 32 x i32> @vsext_nxv32i8_nxv32i32(<vscale x 32 x i8> %a, <vscal
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB12_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vsext.vf4 v24, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
index 581cc666b6cbd5..c756cc1fa0ca80 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
@@ -508,6 +508,7 @@ declare <vscale x 32 x half> @llvm.vp.sitofp.nxv32f16.nxv32i32(<vscale x 32 x i3
define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vsitofp_nxv32f16_nxv32i32:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v24, v0
; ZVFH-NEXT: csrr a1, vlenb
; ZVFH-NEXT: srli a2, a1, 2
@@ -524,6 +525,7 @@ define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
; ZVFH-NEXT: # %bb.1:
; ZVFH-NEXT: mv a0, a1
; ZVFH-NEXT: .LBB34_2:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v0, v24
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfncvt.f.x.w v24, v8, v0.t
@@ -532,6 +534,7 @@ define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
;
; ZVFHMIN-LABEL: vsitofp_nxv32f16_nxv32i32:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: srli a2, a1, 2
@@ -550,6 +553,7 @@ define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB34_2:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
@@ -566,6 +570,7 @@ declare <vscale x 32 x float> @llvm.vp.sitofp.nxv32f32.nxv32i32(<vscale x 32 x i
define <vscale x 32 x float> @vsitofp_nxv32f32_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsitofp_nxv32f32_nxv32i32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -582,6 +587,7 @@ define <vscale x 32 x float> @vsitofp_nxv32f32_nxv32i32(<vscale x 32 x i32> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB35_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
index f9c24eeec31c56..90db93cc838435 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
@@ -590,25 +590,27 @@ declare <vscale x 128 x i8> @llvm.vp.ssub.sat.nxv128i8(<vscale x 128 x i8>, <vsc
define <vscale x 128 x i8> @vssub_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vssub_vi_nxv128i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: sub a2, a1, a0
-; CHECK-NEXT: sltu a3, a1, a2
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub a0, a1, a2
+; CHECK-NEXT: sltu a3, a1, a0
; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a3, a3, a2
-; CHECK-NEXT: li a2, -1
+; CHECK-NEXT: and a3, a3, a0
+; CHECK-NEXT: li a0, -1
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
-; CHECK-NEXT: vssub.vx v16, v16, a2, v0.t
-; CHECK-NEXT: bltu a1, a0, .LBB50_2
+; CHECK-NEXT: vssub.vx v16, v16, a0, v0.t
+; CHECK-NEXT: bltu a1, a2, .LBB50_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB50_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a2, v0.t
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 128 x i8> @llvm.vp.ssub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
ret <vscale x 128 x i8> %v
@@ -1392,6 +1394,7 @@ declare <vscale x 32 x i32> @llvm.vp.ssub.sat.nxv32i32(<vscale x 32 x i32>, <vsc
define <vscale x 32 x i32> @vssub_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vssub_vi_nxv32i32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -1409,6 +1412,7 @@ define <vscale x 32 x i32> @vssub_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vssub.vx v8, v8, a2, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
index 04a1b522a8a33a..4c4721c52154e5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
@@ -588,25 +588,27 @@ declare <vscale x 128 x i8> @llvm.vp.usub.sat.nxv128i8(<vscale x 128 x i8>, <vsc
define <vscale x 128 x i8> @vssubu_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vssubu_vi_nxv128i8:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: sub a2, a1, a0
-; CHECK-NEXT: sltu a3, a1, a2
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub a0, a1, a2
+; CHECK-NEXT: sltu a3, a1, a0
; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a3, a3, a2
-; CHECK-NEXT: li a2, -1
+; CHECK-NEXT: and a3, a3, a0
+; CHECK-NEXT: li a0, -1
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
-; CHECK-NEXT: vssubu.vx v16, v16, a2, v0.t
-; CHECK-NEXT: bltu a1, a0, .LBB50_2
+; CHECK-NEXT: vssubu.vx v16, v16, a0, v0.t
+; CHECK-NEXT: bltu a1, a2, .LBB50_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB50_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a2, v0.t
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 128 x i8> @llvm.vp.usub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
ret <vscale x 128 x i8> %v
@@ -1390,6 +1392,7 @@ declare <vscale x 32 x i32> @llvm.vp.usub.sat.nxv32i32(<vscale x 32 x i32>, <vsc
define <vscale x 32 x i32> @vssubu_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vssubu_vi_nxv32i32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -1407,6 +1410,7 @@ define <vscale x 32 x i32> @vssubu_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vssubu.vx v8, v8, a2, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
index e62b7a00396388..184188292d39f7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
@@ -157,6 +157,7 @@ declare <vscale x 15 x i16> @llvm.vp.trunc.nxv15i16.nxv15i64(<vscale x 15 x i64>
define <vscale x 15 x i16> @vtrunc_nxv15i16_nxv15i64(<vscale x 15 x i64> %a, <vscale x 15 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv15i16_nxv15i64:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
@@ -174,6 +175,7 @@ define <vscale x 15 x i16> @vtrunc_nxv15i16_nxv15i64(<vscale x 15 x i64> %a, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB12_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
@@ -214,6 +216,7 @@ declare <vscale x 32 x i7> @llvm.vp.trunc.nxv32i7.nxv32i32(<vscale x 32 x i32>,
define <vscale x 32 x i7> @vtrunc_nxv32i7_nxv32i32(<vscale x 32 x i32> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv32i7_nxv32i32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -232,6 +235,7 @@ define <vscale x 32 x i7> @vtrunc_nxv32i7_nxv32i32(<vscale x 32 x i32> %a, <vsca
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB15_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
@@ -248,6 +252,7 @@ declare <vscale x 32 x i8> @llvm.vp.trunc.nxv32i8.nxv32i32(<vscale x 32 x i32>,
define <vscale x 32 x i8> @vtrunc_nxv32i8_nxv32i32(<vscale x 32 x i32> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv32i8_nxv32i32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -266,6 +271,7 @@ define <vscale x 32 x i8> @vtrunc_nxv32i8_nxv32i32(<vscale x 32 x i32> %a, <vsca
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB16_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
@@ -288,6 +294,7 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -322,6 +329,7 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a5, a1
; CHECK-NEXT: .LBB17_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v6, v7, a3
@@ -335,6 +343,7 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: sltu a3, a2, a0
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a0, a3, a0
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
@@ -344,6 +353,7 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB17_6:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
index 2c5a01279d5d37..ea9fe87843125a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
@@ -500,6 +500,7 @@ declare <vscale x 32 x half> @llvm.vp.uitofp.nxv32f16.nxv32i32(<vscale x 32 x i3
define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vuitofp_nxv32f16_nxv32i32:
; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v24, v0
; ZVFH-NEXT: csrr a1, vlenb
; ZVFH-NEXT: srli a2, a1, 2
@@ -516,6 +517,7 @@ define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
; ZVFH-NEXT: # %bb.1:
; ZVFH-NEXT: mv a0, a1
; ZVFH-NEXT: .LBB34_2:
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v0, v24
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfncvt.f.xu.w v24, v8, v0.t
@@ -524,6 +526,7 @@ define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
;
; ZVFHMIN-LABEL: vuitofp_nxv32f16_nxv32i32:
; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: srli a2, a1, 2
@@ -542,6 +545,7 @@ define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB34_2:
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.f.xu.v v8, v8, v0.t
@@ -558,6 +562,7 @@ declare <vscale x 32 x float> @llvm.vp.uitofp.nxv32f32.nxv32i32(<vscale x 32 x i
define <vscale x 32 x float> @vuitofp_nxv32f32_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vuitofp_nxv32f32_nxv32i32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -574,6 +579,7 @@ define <vscale x 32 x float> @vuitofp_nxv32f32_nxv32i32(<vscale x 32 x i32> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB35_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll
index 10e655c8445409..f7cef4a8e9ca60 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll
@@ -151,6 +151,7 @@ declare <vscale x 32 x i32> @llvm.vp.zext.nxv32i32.nxv32i8(<vscale x 32 x i8>, <
define <vscale x 32 x i32> @vzext_nxv32i8_nxv32i32(<vscale x 32 x i8> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vzext_nxv32i8_nxv32i32:
; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -167,6 +168,7 @@ define <vscale x 32 x i32> @vzext_nxv32i8_nxv32i32(<vscale x 32 x i8> %a, <vscal
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB12_2:
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vzext.vf4 v24, v8, v0.t
>From aa51793cee717ac4e30ae18bf2afc5786f54a2c8 Mon Sep 17 00:00:00 2001
From: Piyou Chen <piyou.chen at sifive.com>
Date: Wed, 27 Nov 2024 02:37:29 -0800
Subject: [PATCH 2/7] Retrieve predecessors VSETVLInfo
---
llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 4 +++
llvm/test/CodeGen/RISCV/rvv/abs-vp.ll | 1 -
llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll | 2 --
llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll | 2 --
llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll | 1 -
llvm/test/CodeGen/RISCV/rvv/copyprop.mir | 1 -
llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll | 4 ---
llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll | 2 --
llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll | 5 ----
llvm/test/CodeGen/RISCV/rvv/expandload.ll | 28 -------------------
.../RISCV/rvv/fixed-vectors-ceil-vp.ll | 1 -
.../RISCV/rvv/fixed-vectors-floor-vp.ll | 1 -
.../RISCV/rvv/fixed-vectors-fmaximum-vp.ll | 1 -
.../RISCV/rvv/fixed-vectors-fminimum-vp.ll | 1 -
.../RISCV/rvv/fixed-vectors-nearbyint-vp.ll | 1 -
.../RISCV/rvv/fixed-vectors-rint-vp.ll | 1 -
.../RISCV/rvv/fixed-vectors-round-vp.ll | 1 -
.../RISCV/rvv/fixed-vectors-roundeven-vp.ll | 1 -
.../RISCV/rvv/fixed-vectors-roundtozero-vp.ll | 1 -
.../RISCV/rvv/fixed-vectors-setcc-int-vp.ll | 3 --
.../RISCV/rvv/fixed-vectors-trunc-vp.ll | 6 ----
.../RISCV/rvv/fixed-vectors-vadd-vp.ll | 1 -
.../RISCV/rvv/fixed-vectors-vmax-vp.ll | 1 -
.../RISCV/rvv/fixed-vectors-vmaxu-vp.ll | 1 -
.../RISCV/rvv/fixed-vectors-vmin-vp.ll | 1 -
.../RISCV/rvv/fixed-vectors-vminu-vp.ll | 1 -
.../RISCV/rvv/fixed-vectors-vpgather.ll | 1 -
.../CodeGen/RISCV/rvv/fixed-vectors-vpload.ll | 1 -
.../RISCV/rvv/fixed-vectors-vsadd-vp.ll | 1 -
.../RISCV/rvv/fixed-vectors-vsaddu-vp.ll | 1 -
.../RISCV/rvv/fixed-vectors-vselect-vp.ll | 1 -
.../RISCV/rvv/fixed-vectors-vssub-vp.ll | 1 -
.../RISCV/rvv/fixed-vectors-vssubu-vp.ll | 1 -
llvm/test/CodeGen/RISCV/rvv/floor-vp.ll | 1 -
llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll | 1 -
llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll | 1 -
llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll | 2 --
llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll | 1 -
llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll | 1 -
llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll | 1 -
llvm/test/CodeGen/RISCV/rvv/rint-vp.ll | 1 -
llvm/test/CodeGen/RISCV/rvv/round-vp.ll | 1 -
llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll | 1 -
llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll | 1 -
llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll | 5 ----
llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll | 6 ----
llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll | 3 --
llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll | 1 -
llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll | 2 --
llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll | 1 -
llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll | 1 -
llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll | 1 -
llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll | 2 --
llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll | 2 --
llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll | 4 ---
llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll | 1 -
llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll | 3 --
llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll | 3 --
llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll | 3 --
llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll | 3 --
.../test/CodeGen/RISCV/rvv/vpgather-sdnode.ll | 8 ------
llvm/test/CodeGen/RISCV/rvv/vpload.ll | 2 --
llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll | 3 --
.../CodeGen/RISCV/rvv/vreductions-mask-vp.ll | 23 ++++++++-------
llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll | 2 --
llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll | 2 --
llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll | 3 --
llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll | 1 -
llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll | 3 --
llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll | 22 +++++++--------
llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll | 22 +++++++--------
llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll | 6 ----
llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll | 3 --
llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll | 1 -
74 files changed, 35 insertions(+), 199 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 64bb732b575ab7..5433916202ec28 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -1793,6 +1793,10 @@ static bool isRVVCopy(const MachineInstr &MI) {
void RISCVInsertVSETVLI::enableVTYPEBeforeMove(MachineBasicBlock &MBB) {
bool NeedVSETVL = true;
+ if (!BlockInfo[MBB.getNumber()].Pred.isUnknown() &&
+ BlockInfo[MBB.getNumber()].Pred.isValid())
+ NeedVSETVL = false;
+
for (auto &MI : MBB) {
if (isVectorConfigInstr(MI) || RISCVII::hasSEWOp(MI.getDesc().TSFlags))
NeedVSETVL = false;
diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
index c5ff5deabb6e75..685e29ef6d9179 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
@@ -591,7 +591,6 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64(<vscale x 16 x i64> %va, <vscale x 1
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB46_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
index a41b63babd8a08..3d0a5cc77ef679 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
@@ -3122,7 +3122,6 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a3
; CHECK-NEXT: .LBB46_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: slli a3, a3, 3
@@ -3177,7 +3176,6 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB46_2:
-; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
index 12d98b05dbb44e..19f30a7ce438aa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
@@ -1610,7 +1610,6 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -1650,7 +1649,6 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
; CHECK-ZVKB-NEXT: # %bb.1:
; CHECK-ZVKB-NEXT: mv a0, a1
; CHECK-ZVKB-NEXT: .LBB32_2:
-; CHECK-ZVKB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVKB-NEXT: vmv1r.v v0, v24
; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-ZVKB-NEXT: vrev8.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
index d9644b70531f39..b5cf302605f885 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
@@ -1477,7 +1477,6 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64(<vscale x 16 x double> %va, <
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB44_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/copyprop.mir b/llvm/test/CodeGen/RISCV/rvv/copyprop.mir
index a4cfbe208581b7..a9da6c305aac3c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/copyprop.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/copyprop.mir
@@ -22,7 +22,6 @@
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: bgeu a0, a2, .LBB0_2
; CHECK-NEXT: .LBB0_4: # %entry
- ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vse64.v v8, (a1)
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
index 9f2712b9506426..f18420d895cd97 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
@@ -1260,7 +1260,6 @@ define <vscale x 16 x i64> @vp_ctlz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB46_2:
; CHECK-NEXT: fsrmi a1, 1
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t
@@ -1288,7 +1287,6 @@ define <vscale x 16 x i64> @vp_ctlz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB46_2:
-; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vclz.v v8, v8, v0.t
@@ -2492,7 +2490,6 @@ define <vscale x 16 x i64> @vp_ctlz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB94_2:
; CHECK-NEXT: fsrmi a1, 1
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t
@@ -2519,7 +2516,6 @@ define <vscale x 16 x i64> @vp_ctlz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB94_2:
-; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vclz.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
index 27c7120a35b9b3..ec2f08f6014292 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
@@ -2127,7 +2127,6 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB46_2:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: li a3, 24
; RV32-NEXT: mul a1, a1, a3
@@ -2312,7 +2311,6 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB46_2:
-; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
index 7be34275ce27c0..d6f4656a77c65f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
@@ -2391,7 +2391,6 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB46_2:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v0, v24
; RV32-NEXT: slli a1, a1, 5
; RV32-NEXT: add a1, sp, a1
@@ -2556,7 +2555,6 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB46_2:
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: slli a1, a1, 3
; RV64-NEXT: add a1, sp, a1
@@ -2606,7 +2604,6 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB46_2:
-; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vctz.v v8, v8, v0.t
@@ -4039,7 +4036,6 @@ define <vscale x 16 x i64> @vp_cttz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB94_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
@@ -4081,7 +4077,6 @@ define <vscale x 16 x i64> @vp_cttz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB94_2:
-; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vctz.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/expandload.ll b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
index 07159c0c0cd8f6..d1c5df1b3efa91 100644
--- a/llvm/test/CodeGen/RISCV/rvv/expandload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
@@ -1657,7 +1657,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a2, .LBB61_34
; CHECK-RV32-NEXT: # %bb.33: # %cond.load117
; CHECK-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v9, a2
; CHECK-RV32-NEXT: vsetivli zero, 31, e8, m1, tu, ma
@@ -1810,7 +1809,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a3, .LBB61_68
; CHECK-RV32-NEXT: # %bb.67: # %cond.load245
; CHECK-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v9, a3
; CHECK-RV32-NEXT: li a3, 63
@@ -1965,7 +1963,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a2, .LBB61_102
; CHECK-RV32-NEXT: # %bb.101: # %cond.load373
; CHECK-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v10, a2
; CHECK-RV32-NEXT: li a2, 95
@@ -2120,7 +2117,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a3, .LBB61_136
; CHECK-RV32-NEXT: # %bb.135: # %cond.load501
; CHECK-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v10, a3
; CHECK-RV32-NEXT: li a3, 127
@@ -2275,7 +2271,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a2, .LBB61_170
; CHECK-RV32-NEXT: # %bb.169: # %cond.load629
; CHECK-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v12, a2
; CHECK-RV32-NEXT: li a2, 159
@@ -2430,7 +2425,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a3, .LBB61_204
; CHECK-RV32-NEXT: # %bb.203: # %cond.load757
; CHECK-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v12, a3
; CHECK-RV32-NEXT: li a3, 191
@@ -2585,7 +2579,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a2, .LBB61_238
; CHECK-RV32-NEXT: # %bb.237: # %cond.load885
; CHECK-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v12, a2
; CHECK-RV32-NEXT: li a2, 223
@@ -2740,7 +2733,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a3, .LBB61_272
; CHECK-RV32-NEXT: # %bb.271: # %cond.load1013
; CHECK-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v12, a3
; CHECK-RV32-NEXT: li a3, 255
@@ -3915,7 +3907,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: ret
; CHECK-RV32-NEXT: .LBB61_544: # %cond.load
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, zero, e8, mf8, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v8, a1
@@ -3927,7 +3918,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_2
; CHECK-RV32-NEXT: .LBB61_545: # %cond.load1
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4281,7 +4271,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_30
; CHECK-RV32-NEXT: .LBB61_572: # %cond.load121
; CHECK-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vmv.s.x v9, a3
; CHECK-RV32-NEXT: li a3, 32
@@ -4790,7 +4779,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_66
; CHECK-RV32-NEXT: .LBB61_602: # %cond.load249
; CHECK-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v9, a2
; CHECK-RV32-NEXT: li a2, 64
@@ -5300,7 +5288,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_100
; CHECK-RV32-NEXT: .LBB61_632: # %cond.load377
; CHECK-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vmv.s.x v10, a3
; CHECK-RV32-NEXT: li a3, 96
@@ -5810,7 +5797,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_134
; CHECK-RV32-NEXT: .LBB61_662: # %cond.load505
; CHECK-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v10, a2
; CHECK-RV32-NEXT: li a2, 128
@@ -6320,7 +6306,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_168
; CHECK-RV32-NEXT: .LBB61_692: # %cond.load633
; CHECK-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vmv.s.x v12, a3
; CHECK-RV32-NEXT: li a3, 160
@@ -6830,7 +6815,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_202
; CHECK-RV32-NEXT: .LBB61_722: # %cond.load761
; CHECK-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v12, a2
; CHECK-RV32-NEXT: li a2, 192
@@ -7340,7 +7324,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_236
; CHECK-RV32-NEXT: .LBB61_752: # %cond.load889
; CHECK-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vmv.s.x v12, a3
; CHECK-RV32-NEXT: li a3, 224
@@ -7850,7 +7833,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_270
; CHECK-RV32-NEXT: .LBB61_782: # %cond.load1017
; CHECK-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vmv.s.x v12, a2
; CHECK-RV32-NEXT: li a2, 256
@@ -11275,7 +11257,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: bgez a1, .LBB61_65
; CHECK-RV64-NEXT: # %bb.64: # %cond.load245
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v24, v8
; CHECK-RV64-NEXT: vmv.s.x v9, a1
; CHECK-RV64-NEXT: li a1, 63
@@ -11558,7 +11539,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: bgez a2, .LBB61_131
; CHECK-RV64-NEXT: # %bb.130: # %cond.load501
; CHECK-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v24, v8
; CHECK-RV64-NEXT: vmv.s.x v10, a2
; CHECK-RV64-NEXT: li a2, 127
@@ -11841,7 +11821,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: bgez a1, .LBB61_197
; CHECK-RV64-NEXT: # %bb.196: # %cond.load757
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v24, v8
; CHECK-RV64-NEXT: vmv.s.x v12, a1
; CHECK-RV64-NEXT: li a1, 191
@@ -12124,7 +12103,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: bgez a2, .LBB61_263
; CHECK-RV64-NEXT: # %bb.262: # %cond.load1013
; CHECK-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v24, v8
; CHECK-RV64-NEXT: vmv.s.x v12, a2
; CHECK-RV64-NEXT: li a2, 255
@@ -13223,7 +13201,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: ret
; CHECK-RV64-NEXT: .LBB61_527: # %cond.load
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, zero, e8, mf8, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v8, a1
@@ -13235,7 +13212,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_2
; CHECK-RV64-NEXT: .LBB61_528: # %cond.load1
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -14137,7 +14113,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_63
; CHECK-RV64-NEXT: .LBB61_588: # %cond.load249
; CHECK-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vmv.s.x v9, a2
; CHECK-RV64-NEXT: li a2, 64
@@ -15191,7 +15166,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_129
; CHECK-RV64-NEXT: .LBB61_650: # %cond.load505
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vmv.s.x v10, a1
; CHECK-RV64-NEXT: li a1, 128
@@ -16245,7 +16219,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_195
; CHECK-RV64-NEXT: .LBB61_712: # %cond.load761
; CHECK-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vmv.s.x v12, a2
; CHECK-RV64-NEXT: li a2, 192
@@ -17299,7 +17272,6 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_261
; CHECK-RV64-NEXT: .LBB61_774: # %cond.load1017
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vmv.s.x v12, a1
; CHECK-RV64-NEXT: li a1, 256
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
index 0e4b02d1690628..bedde48358d615 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
@@ -767,7 +767,6 @@ define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
index ab49c55b79d6e9..3bb11b6a5f2e02 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
@@ -767,7 +767,6 @@ define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
index 9f531762d256c4..435dfcebc0f9d9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
@@ -648,7 +648,6 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB24_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a3, 24
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
index e10f4a88feceef..e3c80b050f1132 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
@@ -648,7 +648,6 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB24_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a3, 24
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
index 268f7c4e041dbe..1ace69a5c3cccd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
@@ -592,7 +592,6 @@ define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
; CHECK-NEXT: slli a2, a2, 3
; CHECK-NEXT: sub sp, sp, a2
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: lui a2, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
index 5104e70c8a1252..237b871648de41 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
@@ -540,7 +540,6 @@ define <32 x double> @vp_rint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
; CHECK-NEXT: slli a2, a2, 3
; CHECK-NEXT: sub sp, sp, a2
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
index 2f50b768c4a656..d308023c507a8b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
@@ -767,7 +767,6 @@ define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
index 0032dd956ed9e8..f5374a377f1dc8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
@@ -767,7 +767,6 @@ define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
index 59bb431ad56487..b3cc29e914a3fd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
@@ -767,7 +767,6 @@ define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
index 7b004c67586c45..e060062b5e3828 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
@@ -623,7 +623,6 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1>
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: .LBB51_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -665,7 +664,6 @@ define <256 x i1> @icmp_eq_vx_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 z
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB52_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
@@ -696,7 +694,6 @@ define <256 x i1> @icmp_eq_vx_swap_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB53_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
index e07be9320990a9..6ae811a33b5935 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
@@ -284,7 +284,6 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a3, 16
; CHECK-NEXT: .LBB16_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v27
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a5)
@@ -309,7 +308,6 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: li a6, 64
; CHECK-NEXT: .LBB16_4:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v27
; CHECK-NEXT: addi a5, a1, 384
; CHECK-NEXT: li a3, 32
@@ -346,7 +344,6 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: li a6, 16
; CHECK-NEXT: .LBB16_6:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v26
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a5)
@@ -395,7 +392,6 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: # %bb.9:
; CHECK-NEXT: li a4, 16
; CHECK-NEXT: .LBB16_10:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v25, v7, 2
@@ -412,7 +408,6 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: # %bb.11:
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: .LBB16_12:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: csrr a4, vlenb
; CHECK-NEXT: slli a4, a4, 4
@@ -497,7 +492,6 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: # %bb.13:
; CHECK-NEXT: li a7, 16
; CHECK-NEXT: .LBB16_14:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 5
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
index a5cf019db6a279..c1e91850f053fc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
@@ -378,7 +378,6 @@ define <256 x i8> @vadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %ev
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
index 9d68f737946a43..ccd9f291a8659f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
@@ -282,7 +282,6 @@ define <256 x i8> @vmax_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zero
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
index 1ba36b879d38d1..b2969eeed05678 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
@@ -281,7 +281,6 @@ define <256 x i8> @vmaxu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zer
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
index 0bb646d360556d..6464749e70181f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
@@ -282,7 +282,6 @@ define <256 x i8> @vmin_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zero
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
index a1cee432cf37b8..50e95053574973 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
@@ -281,7 +281,6 @@ define <256 x i8> @vminu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zer
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
index 312c3061f4a63b..0fceb00daa3ca7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
@@ -2641,7 +2641,6 @@ define <32 x double> @vpgather_baseidx_v32f64(ptr %base, <32 x i64> %idxs, <32 x
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a1, 16
; RV32-NEXT: .LBB104_2:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
index 17b7aeda87118b..6104115862adcd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
@@ -429,7 +429,6 @@ define <33 x double> @vpload_v33f64(ptr %ptr, <33 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: .LBB32_6:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a1), v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
index 2b80c37496a3ec..44c42ad7d638ba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
@@ -387,7 +387,6 @@ define <256 x i8> @vsadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %e
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
index a95226b55ab70e..cffe69d2222429 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
@@ -383,7 +383,6 @@ define <256 x i8> @vsaddu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
index ececb43de22d7d..0d541917e9f6c5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
@@ -183,7 +183,6 @@ define <256 x i8> @select_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i3
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: .LBB11_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
index 1cb2ce508b6401..75db312b0c6e8c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
@@ -400,7 +400,6 @@ define <256 x i8> @vssub_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %e
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
index 03691d58ca3078..03c3c93cfca54f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
@@ -395,7 +395,6 @@ define <256 x i8> @vssubu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
index 3a6dc6261a70f6..18911a5f7568bb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
@@ -1477,7 +1477,6 @@ define <vscale x 16 x double> @vp_floor_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB44_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
index 5cba24e02a3ffb..752c9dcf51c4b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
@@ -1658,7 +1658,6 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB40_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
index a26f14ef42dc10..58377365949fe4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
@@ -1658,7 +1658,6 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB40_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
index 1b40dd2f9c8b32..a8c8ad86452d45 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
@@ -1107,7 +1107,6 @@ define <vscale x 16 x i64> @fshr_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB46_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 40
@@ -1267,7 +1266,6 @@ define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a3
; CHECK-NEXT: .LBB47_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
diff --git a/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll
index 8ddbf9de0b6e29..86c2c07704cfd7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll
@@ -71,7 +71,6 @@ define <vscale x 16 x i64> @llrint_nxv16i64_nxv16f32(<vscale x 16 x float> %x, <
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB4_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfwcvt.x.f.v v24, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll
index 645af077e1a860..4650cea23dd00f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll
@@ -133,7 +133,6 @@ define <vscale x 16 x iXLen> @lrint_nxv16f32(<vscale x 16 x float> %x, <vscale x
; RV64-i64-NEXT: # %bb.1:
; RV64-i64-NEXT: mv a0, a1
; RV64-i64-NEXT: .LBB4_2:
-; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-i64-NEXT: vmv1r.v v0, v24
; RV64-i64-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; RV64-i64-NEXT: vfwcvt.x.f.v v24, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
index 3d1696c7c6afa9..4474d74007ed30 100644
--- a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
@@ -1434,7 +1434,6 @@ define <vscale x 16 x double> @vp_nearbyint_nxv16f64(<vscale x 16 x double> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB44_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
index 85113e77e50304..72cc477a261d33 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
@@ -1351,7 +1351,6 @@ define <vscale x 16 x double> @vp_rint_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB44_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
index b8a1429191217b..441634e934b087 100644
--- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
@@ -1477,7 +1477,6 @@ define <vscale x 16 x double> @vp_round_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB44_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
index 6e64c381710487..786e444dafe3c1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
@@ -1477,7 +1477,6 @@ define <vscale x 16 x double> @vp_roundeven_nxv16f64(<vscale x 16 x double> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB44_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
index acbb1abceffb60..9a3c7aa46f3306 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
@@ -1477,7 +1477,6 @@ define <vscale x 16 x double> @vp_roundtozero_nxv16f64(<vscale x 16 x double> %v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB44_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
index 680f4c6e34b30e..da3c194652df76 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
@@ -1543,7 +1543,6 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64bf16(<vscale x 64 x bfloat> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a6, a4
; CHECK-NEXT: .LBB85_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a7, a0, 3
@@ -1616,7 +1615,6 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64bf16(<vscale x 64 x bfloat> %va, <vs
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a2, a4
; CHECK-NEXT: .LBB85_6:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli a4, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v16
@@ -3751,7 +3749,6 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFH-NEXT: # %bb.1:
; ZVFH-NEXT: mv a2, a3
; ZVFH-NEXT: .LBB171_2:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v0, v24
; ZVFH-NEXT: csrr a0, vlenb
; ZVFH-NEXT: slli a0, a0, 3
@@ -3856,7 +3853,6 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a6, a4
; ZVFHMIN-NEXT: .LBB171_2:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v0, v25
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a7, a0, 3
@@ -3929,7 +3925,6 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: # %bb.5:
; ZVFHMIN-NEXT: mv a2, a4
; ZVFHMIN-NEXT: .LBB171_6:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
index 09cc0761729b69..70347754f39c3b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
@@ -1118,7 +1118,6 @@ define <vscale x 128 x i1> @icmp_eq_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a1
; CHECK-NEXT: .LBB96_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -1161,7 +1160,6 @@ define <vscale x 128 x i1> @icmp_eq_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB97_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
@@ -1193,7 +1191,6 @@ define <vscale x 128 x i1> @icmp_eq_vx_swap_nxv128i8(<vscale x 128 x i8> %va, i8
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB98_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
@@ -2278,7 +2275,6 @@ define <vscale x 32 x i1> @icmp_eq_vv_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB189_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -2324,7 +2320,6 @@ define <vscale x 32 x i1> @icmp_eq_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a3
; CHECK-NEXT: .LBB190_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
@@ -2359,7 +2354,6 @@ define <vscale x 32 x i1> @icmp_eq_vx_swap_nxv32i32(<vscale x 32 x i32> %va, i32
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a3
; CHECK-NEXT: .LBB191_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
index dfa1712dee10cf..2ccc4b723b170f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
@@ -581,7 +581,6 @@ define <vscale x 128 x i8> @vadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB50_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
@@ -1362,7 +1361,6 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
@@ -1420,7 +1418,6 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB120_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
index 99dd5331c78404..9b9bb20cb54981 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
@@ -478,7 +478,6 @@ define <vscale x 16 x double> @vfabs_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
index c082497f8115c9..b134293f2f6c10 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
@@ -3749,7 +3749,6 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB128_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
@@ -9029,7 +9028,6 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16(<vscale x 32 x half> %va, half %
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a3, a2
; ZVFHMIN-NEXT: .LBB282_2:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: addi a4, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
index 3d6d33f06c17bc..17ce2a00de04a4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
@@ -1187,7 +1187,6 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB92_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
index cdddc44d92ab4a..5e9fec14b759e8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
@@ -466,7 +466,6 @@ define <vscale x 16 x double> @vfneg_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfneg.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
index 27dd928107e9c9..32a9b47e99ec1e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
@@ -113,7 +113,6 @@ define <vscale x 32 x float> @vfpext_nxv32f16_nxv32f32(<vscale x 32 x half> %a,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB7_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfwcvt.f.f.v v24, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
index a9c0d7f4072396..3a855326692d7a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
@@ -525,7 +525,6 @@ define <vscale x 32 x i16> @vfptosi_nxv32i16_nxv32f32(<vscale x 32 x float> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB34_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfncvt.rtz.x.f.w v24, v8, v0.t
@@ -557,7 +556,6 @@ define <vscale x 32 x i32> @vfptosi_nxv32i32_nxv32f32(<vscale x 32 x float> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB35_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
index be75707449ac10..96ab124a144514 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
@@ -525,7 +525,6 @@ define <vscale x 32 x i16> @vfptoui_nxv32i16_nxv32f32(<vscale x 32 x float> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB34_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfncvt.rtz.xu.f.w v24, v8, v0.t
@@ -557,7 +556,6 @@ define <vscale x 32 x i32> @vfptoui_nxv32i32_nxv32f32(<vscale x 32 x float> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB35_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
index d2dc927182cc86..1d56c5db968fad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
@@ -122,7 +122,6 @@ define <vscale x 16 x float> @vfptrunc_nxv16f32_nxv16f64(<vscale x 16 x double>
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB7_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfncvt.f.f.w v16, v8, v0.t
@@ -184,7 +183,6 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a5, a1
; CHECK-NEXT: .LBB8_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v6, v7, a3
@@ -198,7 +196,6 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: sltu a3, a2, a0
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a0, a3, a0
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
@@ -208,7 +205,6 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB8_6:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
index 69951bed12a977..3e4de295add01b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
@@ -767,7 +767,6 @@ define <vscale x 16 x double> @vfsqrt_vv_nxv16f64(<vscale x 16 x double> %va, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB44_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsqrt.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
index 0fbf710e0d2c6a..bba24dd3b09515 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
@@ -428,7 +428,6 @@ define <vscale x 128 x i8> @vmax_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB34_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
@@ -993,7 +992,6 @@ define <vscale x 32 x i32> @vmax_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB80_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
@@ -1055,7 +1053,6 @@ define <vscale x 32 x i32> @vmax_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i3
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB82_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
index 0b7ffb0cc48039..61b416f9f1b429 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
@@ -426,7 +426,6 @@ define <vscale x 128 x i8> @vmaxu_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB34_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
@@ -991,7 +990,6 @@ define <vscale x 32 x i32> @vmaxu_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB80_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
@@ -1053,7 +1051,6 @@ define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB82_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
index f797dacc25a3d7..3c7b03469e1165 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
@@ -428,7 +428,6 @@ define <vscale x 128 x i8> @vmin_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB34_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
@@ -993,7 +992,6 @@ define <vscale x 32 x i32> @vmin_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB80_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
@@ -1055,7 +1053,6 @@ define <vscale x 32 x i32> @vmin_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i3
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB82_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
index 7fd4125cff843e..4da5a7e476870d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
@@ -426,7 +426,6 @@ define <vscale x 128 x i8> @vminu_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB34_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
@@ -991,7 +990,6 @@ define <vscale x 32 x i32> @vminu_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB80_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
@@ -1053,7 +1051,6 @@ define <vscale x 32 x i32> @vminu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB82_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
index b13f146134ecb8..f8294155e82ad3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
@@ -2475,7 +2475,6 @@ define <vscale x 16 x double> @vpgather_nxv16f64(<vscale x 16 x ptr> %ptrs, <vsc
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB111_2:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v0, v24
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v24, (zero), v8, v0.t
@@ -2500,7 +2499,6 @@ define <vscale x 16 x double> @vpgather_nxv16f64(<vscale x 16 x ptr> %ptrs, <vsc
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB111_2:
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
@@ -2531,7 +2529,6 @@ define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vs
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB112_2:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v0, v12
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
@@ -2561,7 +2558,6 @@ define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vs
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a1, a2
; RV64-NEXT: .LBB112_2:
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t
@@ -2593,7 +2589,6 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB113_2:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v0, v12
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
@@ -2623,7 +2618,6 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a1, a2
; RV64-NEXT: .LBB113_2:
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t
@@ -2656,7 +2650,6 @@ define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(ptr %base
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB114_2:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v0, v12
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
@@ -2683,7 +2676,6 @@ define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(ptr %base
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a1, a2
; RV64-NEXT: .LBB114_2:
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei32.v v8, (a0), v24, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpload.ll b/llvm/test/CodeGen/RISCV/rvv/vpload.ll
index c0848fb25a6a9e..1dbc115a155aa2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpload.ll
@@ -540,7 +540,6 @@ define <vscale x 16 x double> @vpload_nxv16f64(ptr %ptr, <vscale x 16 x i1> %m,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB44_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
@@ -602,7 +601,6 @@ define <vscale x 16 x double> @vpload_nxv17f64(ptr %ptr, ptr %out, <vscale x 17
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a4, a3
; CHECK-NEXT: .LBB45_6:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
index df238f2f01b5a9..ab539a03ea18c2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
@@ -383,7 +383,6 @@ define <vscale x 128 x i8> @vpmerge_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a1
; CHECK-NEXT: .LBB28_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
@@ -419,7 +418,6 @@ define <vscale x 128 x i8> @vpmerge_vx_nxv128i8(i8 %a, <vscale x 128 x i8> %vb,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB29_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, tu, ma
; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
@@ -449,7 +447,6 @@ define <vscale x 128 x i8> @vpmerge_vi_nxv128i8(<vscale x 128 x i8> %vb, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB30_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
; CHECK-NEXT: vmerge.vim v8, v8, 2, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
index 31d76699b17d0c..31766803364861 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
@@ -376,27 +376,26 @@ define zeroext i1 @vpreduce_or_nxv128i1(i1 zeroext %s, <vscale x 128 x i1> %v, <
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: csrr a3, vlenb
-; CHECK-NEXT: slli a3, a3, 3
-; CHECK-NEXT: sub a2, a1, a3
-; CHECK-NEXT: sltu a4, a1, a2
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub a3, a1, a2
+; CHECK-NEXT: sltu a4, a1, a3
; CHECK-NEXT: addi a4, a4, -1
-; CHECK-NEXT: and a2, a4, a2
+; CHECK-NEXT: and a3, a4, a3
; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; CHECK-NEXT: vcpop.m a2, v8, v0.t
-; CHECK-NEXT: snez a2, a2
-; CHECK-NEXT: bltu a1, a3, .LBB22_2
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
+; CHECK-NEXT: vcpop.m a3, v8, v0.t
+; CHECK-NEXT: snez a3, a3
+; CHECK-NEXT: bltu a1, a2, .LBB22_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a1, a3
+; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vcpop.m a1, v11, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
-; CHECK-NEXT: or a0, a2, a0
+; CHECK-NEXT: or a0, a3, a0
; CHECK-NEXT: ret
%r = call i1 @llvm.vp.reduce.or.nxv128i1(i1 %s, <vscale x 128 x i1> %v, <vscale x 128 x i1> %m, i32 %evl)
ret i1 %r
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
index b483748064cdae..cfe5aee6edb9ae 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
@@ -588,7 +588,6 @@ define <vscale x 128 x i8> @vsadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB50_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
@@ -1369,7 +1368,6 @@ define <vscale x 32 x i32> @vsadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
index 3bb4ae1ec1e723..f45154222a9a78 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
@@ -587,7 +587,6 @@ define <vscale x 128 x i8> @vsaddu_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB50_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
@@ -1368,7 +1367,6 @@ define <vscale x 32 x i32> @vsaddu_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
index 2c1d6691fb83c1..6de6d56badbabd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
@@ -385,7 +385,6 @@ define <vscale x 32 x i32> @select_nxv32i32(<vscale x 32 x i1> %a, <vscale x 32
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB27_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -446,7 +445,6 @@ define <vscale x 32 x i32> @select_evl_nxv32i32(<vscale x 32 x i1> %a, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB28_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -736,7 +734,6 @@ define <vscale x 16 x double> @select_nxv16f64(<vscale x 16 x i1> %a, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB48_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll
index 85ffa9d5965a12..7add176edf4bdd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll
@@ -168,7 +168,6 @@ define <vscale x 32 x i32> @vsext_nxv32i8_nxv32i32(<vscale x 32 x i8> %a, <vscal
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB12_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vsext.vf4 v24, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
index c756cc1fa0ca80..f75c22ee9d07a9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
@@ -525,7 +525,6 @@ define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
; ZVFH-NEXT: # %bb.1:
; ZVFH-NEXT: mv a0, a1
; ZVFH-NEXT: .LBB34_2:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v0, v24
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfncvt.f.x.w v24, v8, v0.t
@@ -553,7 +552,6 @@ define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB34_2:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
@@ -587,7 +585,6 @@ define <vscale x 32 x float> @vsitofp_nxv32f32_nxv32i32(<vscale x 32 x i32> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB35_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
index 90db93cc838435..b31cba065b0b98 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
@@ -594,23 +594,22 @@ define <vscale x 128 x i8> @vssub_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: sub a0, a1, a2
-; CHECK-NEXT: sltu a3, a1, a0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub a2, a1, a0
+; CHECK-NEXT: sltu a3, a1, a2
; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a3, a3, a0
-; CHECK-NEXT: li a0, -1
+; CHECK-NEXT: and a3, a3, a2
+; CHECK-NEXT: li a2, -1
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
-; CHECK-NEXT: vssub.vx v16, v16, a0, v0.t
-; CHECK-NEXT: bltu a1, a2, .LBB50_2
+; CHECK-NEXT: vssub.vx v16, v16, a2, v0.t
+; CHECK-NEXT: bltu a1, a0, .LBB50_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a1, a2
+; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB50_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vssub.vx v8, v8, a2, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 128 x i8> @llvm.vp.ssub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
ret <vscale x 128 x i8> %v
@@ -1412,7 +1411,6 @@ define <vscale x 32 x i32> @vssub_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vssub.vx v8, v8, a2, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
index 4c4721c52154e5..388614739dd82e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
@@ -592,23 +592,22 @@ define <vscale x 128 x i8> @vssubu_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: sub a0, a1, a2
-; CHECK-NEXT: sltu a3, a1, a0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub a2, a1, a0
+; CHECK-NEXT: sltu a3, a1, a2
; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a3, a3, a0
-; CHECK-NEXT: li a0, -1
+; CHECK-NEXT: and a3, a3, a2
+; CHECK-NEXT: li a2, -1
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
-; CHECK-NEXT: vssubu.vx v16, v16, a0, v0.t
-; CHECK-NEXT: bltu a1, a2, .LBB50_2
+; CHECK-NEXT: vssubu.vx v16, v16, a2, v0.t
+; CHECK-NEXT: bltu a1, a0, .LBB50_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a1, a2
+; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB50_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vssubu.vx v8, v8, a2, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 128 x i8> @llvm.vp.usub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
ret <vscale x 128 x i8> %v
@@ -1410,7 +1409,6 @@ define <vscale x 32 x i32> @vssubu_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vssubu.vx v8, v8, a2, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
index 184188292d39f7..915d7c99d7c667 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
@@ -175,7 +175,6 @@ define <vscale x 15 x i16> @vtrunc_nxv15i16_nxv15i64(<vscale x 15 x i64> %a, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB12_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
@@ -235,7 +234,6 @@ define <vscale x 32 x i7> @vtrunc_nxv32i7_nxv32i32(<vscale x 32 x i32> %a, <vsca
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB15_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
@@ -271,7 +269,6 @@ define <vscale x 32 x i8> @vtrunc_nxv32i8_nxv32i32(<vscale x 32 x i32> %a, <vsca
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB16_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
@@ -329,7 +326,6 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a5, a1
; CHECK-NEXT: .LBB17_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v6, v7, a3
@@ -343,7 +339,6 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: sltu a3, a2, a0
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a0, a3, a0
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
@@ -353,7 +348,6 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB17_6:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
index ea9fe87843125a..5cee7f981c540d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
@@ -517,7 +517,6 @@ define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
; ZVFH-NEXT: # %bb.1:
; ZVFH-NEXT: mv a0, a1
; ZVFH-NEXT: .LBB34_2:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v0, v24
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfncvt.f.xu.w v24, v8, v0.t
@@ -545,7 +544,6 @@ define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB34_2:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.f.xu.v v8, v8, v0.t
@@ -579,7 +577,6 @@ define <vscale x 32 x float> @vuitofp_nxv32f32_nxv32i32(<vscale x 32 x i32> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB35_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll
index f7cef4a8e9ca60..6f74d752bbabec 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll
@@ -168,7 +168,6 @@ define <vscale x 32 x i32> @vzext_nxv32i8_nxv32i32(<vscale x 32 x i8> %a, <vscal
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB12_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vzext.vf4 v24, v8, v0.t
>From fa361d5286103e1c9569d8392d01c7c77aa4f512 Mon Sep 17 00:00:00 2001
From: Piyou Chen <piyou.chen at sifive.com>
Date: Sun, 1 Dec 2024 21:33:34 -0800
Subject: [PATCH 3/7] enableVTYPEBeforeMove -> insertVSETVLIBeforeCopy
---
llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 5433916202ec28..d9a5fedb02f603 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -922,7 +922,7 @@ class RISCVInsertVSETVLI : public MachineFunctionPass {
VSETVLIInfo getInfoForVSETVLI(const MachineInstr &MI) const;
VSETVLIInfo computeInfoForInstr(const MachineInstr &MI) const;
void forwardVSETVLIAVL(VSETVLIInfo &Info) const;
- void enableVTYPEBeforeMove(MachineBasicBlock &MBB);
+ void insertVSETVLIBeforeCopy(MachineBasicBlock &MBB);
};
} // end anonymous namespace
@@ -1790,7 +1790,7 @@ static bool isRVVCopy(const MachineInstr &MI) {
return false;
}
-void RISCVInsertVSETVLI::enableVTYPEBeforeMove(MachineBasicBlock &MBB) {
+void RISCVInsertVSETVLI::insertVSETVLIBeforeCopy(MachineBasicBlock &MBB) {
bool NeedVSETVL = true;
if (!BlockInfo[MBB.getNumber()].Pred.isUnknown() &&
@@ -1888,7 +1888,7 @@ bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) {
insertReadVL(MBB);
for (MachineBasicBlock &MBB : MF)
- enableVTYPEBeforeMove(MBB);
+ insertVSETVLIBeforeCopy(MBB);
BlockInfo.clear();
return HaveVectorOp;
>From 88facd48202d23b2df0f4e636ff8646a1d5ac8c4 Mon Sep 17 00:00:00 2001
From: Piyou Chen <piyou.chen at sifive.com>
Date: Sun, 1 Dec 2024 21:36:26 -0800
Subject: [PATCH 4/7] Use vsetivli x0, 0, *
---
llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index d9a5fedb02f603..e632691c61693c 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -1806,12 +1806,11 @@ void RISCVInsertVSETVLI::insertVSETVLIBeforeCopy(MachineBasicBlock &MBB) {
if (NeedVSETVL && isRVVCopy(MI)) {
auto VSETVL0MI =
- BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(RISCV::PseudoVSETVLIX0))
+ BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(RISCV::PseudoVSETIVLI))
.addReg(RISCV::X0, RegState::Define | RegState::Dead)
- .addReg(RISCV::X0, RegState::Kill)
+ .addImm(0)
.addImm(RISCVVType::encodeVTYPE(RISCVII::VLMUL::LMUL_1, 32, false,
- false))
- .addReg(RISCV::VL, RegState::Implicit);
+ false));
if (LIS)
LIS->InsertMachineInstrInMaps(*VSETVL0MI);
NeedVSETVL = false;
>From 75300651f01d8fc2efd184a3550432acbbe5cfd0 Mon Sep 17 00:00:00 2001
From: Piyou Chen <piyou.chen at sifive.com>
Date: Sun, 1 Dec 2024 21:36:56 -0800
Subject: [PATCH 5/7] insertVSETVLIBeforeCopy -> insertVSETIVLIBeforeCopy
---
llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index e632691c61693c..c05e84e48fd169 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -922,7 +922,7 @@ class RISCVInsertVSETVLI : public MachineFunctionPass {
VSETVLIInfo getInfoForVSETVLI(const MachineInstr &MI) const;
VSETVLIInfo computeInfoForInstr(const MachineInstr &MI) const;
void forwardVSETVLIAVL(VSETVLIInfo &Info) const;
- void insertVSETVLIBeforeCopy(MachineBasicBlock &MBB);
+ void insertVSETIVLIBeforeCopy(MachineBasicBlock &MBB);
};
} // end anonymous namespace
@@ -1790,7 +1790,7 @@ static bool isRVVCopy(const MachineInstr &MI) {
return false;
}
-void RISCVInsertVSETVLI::insertVSETVLIBeforeCopy(MachineBasicBlock &MBB) {
+void RISCVInsertVSETVLI::insertVSETIVLIBeforeCopy(MachineBasicBlock &MBB) {
bool NeedVSETVL = true;
if (!BlockInfo[MBB.getNumber()].Pred.isUnknown() &&
@@ -1887,7 +1887,7 @@ bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) {
insertReadVL(MBB);
for (MachineBasicBlock &MBB : MF)
- insertVSETVLIBeforeCopy(MBB);
+ insertVSETIVLIBeforeCopy(MBB);
BlockInfo.clear();
return HaveVectorOp;
>From 9740196346add752e3f4f00984d8fc482f22e2f1 Mon Sep 17 00:00:00 2001
From: Piyou Chen <piyou.chen at sifive.com>
Date: Sun, 1 Dec 2024 21:43:03 -0800
Subject: [PATCH 6/7] Update testcase
---
.../CodeGen/RISCV/inline-asm-v-constraint.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/abs-vp.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll | 4 +-
.../CodeGen/RISCV/rvv/calling-conv-fastcc.ll | 8 +-
llvm/test/CodeGen/RISCV/rvv/calling-conv.ll | 8 +-
llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll | 38 +-
llvm/test/CodeGen/RISCV/rvv/compressstore.ll | 4 +-
.../RISCV/rvv/constant-folding-crash.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll | 8 +-
llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll | 10 +-
llvm/test/CodeGen/RISCV/rvv/expandload.ll | 972 +++++++++---------
.../CodeGen/RISCV/rvv/extract-subvector.ll | 38 +-
.../rvv/fixed-vector-i8-index-cornercase.ll | 4 +-
.../RISCV/rvv/fixed-vectors-bitreverse-vp.ll | 4 +-
.../rvv/fixed-vectors-calling-conv-fastcc.ll | 2 +-
.../RISCV/rvv/fixed-vectors-calling-conv.ll | 2 +-
.../RISCV/rvv/fixed-vectors-ceil-vp.ll | 20 +-
.../RISCV/rvv/fixed-vectors-ctpop-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-floor-vp.ll | 20 +-
.../RISCV/rvv/fixed-vectors-fmaximum-vp.ll | 34 +-
.../RISCV/rvv/fixed-vectors-fminimum-vp.ll | 34 +-
.../RISCV/rvv/fixed-vectors-fp-interleave.ll | 4 +-
.../RISCV/rvv/fixed-vectors-fptrunc-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll | 2 +-
.../rvv/fixed-vectors-insert-subvector.ll | 4 +-
.../RISCV/rvv/fixed-vectors-int-interleave.ll | 4 +-
.../RISCV/rvv/fixed-vectors-masked-gather.ll | 98 +-
.../rvv/fixed-vectors-masked-load-int.ll | 2 +-
.../RISCV/rvv/fixed-vectors-nearbyint-vp.ll | 16 +-
.../rvv/fixed-vectors-reduction-mask-vp.ll | 62 +-
.../RISCV/rvv/fixed-vectors-rint-vp.ll | 16 +-
.../RISCV/rvv/fixed-vectors-round-vp.ll | 20 +-
.../RISCV/rvv/fixed-vectors-roundeven-vp.ll | 20 +-
.../RISCV/rvv/fixed-vectors-roundtozero-vp.ll | 20 +-
.../RISCV/rvv/fixed-vectors-setcc-int-vp.ll | 6 +-
.../RISCV/rvv/fixed-vectors-shuffle-concat.ll | 18 +-
.../rvv/fixed-vectors-shuffle-exact-vlen.ll | 4 +-
.../rvv/fixed-vectors-shuffle-reverse.ll | 22 +-
.../rvv/fixed-vectors-shuffle-vslide1up.ll | 2 +-
.../fixed-vectors-strided-load-store-asm.ll | 2 +-
.../RISCV/rvv/fixed-vectors-strided-vpload.ll | 6 +-
.../RISCV/rvv/fixed-vectors-trunc-vp.ll | 6 +-
.../RISCV/rvv/fixed-vectors-unaligned.ll | 8 +-
.../RISCV/rvv/fixed-vectors-vadd-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vmax-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vmaxu-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vmin-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vminu-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vpgather.ll | 2 +-
.../CodeGen/RISCV/rvv/fixed-vectors-vpload.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vpmerge.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vsadd-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vsaddu-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vselect-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vssub-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vssubu-vp.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/floor-vp.ll | 38 +-
.../test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll | 50 +-
.../test/CodeGen/RISCV/rvv/fminimum-sdnode.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll | 50 +-
.../RISCV/rvv/fold-scalar-load-crash.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll | 10 +-
llvm/test/CodeGen/RISCV/rvv/inline-asm.ll | 14 +-
.../CodeGen/RISCV/rvv/insert-subvector.ll | 44 +-
llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/masked-tama.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll | 32 +-
.../test/CodeGen/RISCV/rvv/mscatter-sdnode.ll | 2 +-
.../RISCV/rvv/named-vector-shuffle-reverse.ll | 26 +-
llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll | 38 +-
llvm/test/CodeGen/RISCV/rvv/pr88576.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/rint-vp.ll | 38 +-
llvm/test/CodeGen/RISCV/rvv/round-vp.ll | 38 +-
llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll | 38 +-
llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll | 38 +-
.../RISCV/rvv/rv32-spill-vector-csr.ll | 2 +-
.../RISCV/rvv/rv64-spill-vector-csr.ll | 2 +-
.../test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll | 2 +-
.../RISCV/rvv/rvv-peephole-vmerge-vops.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll | 12 +-
.../CodeGen/RISCV/rvv/sink-splat-operands.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll | 8 +-
.../test/CodeGen/RISCV/rvv/strided-vpstore.ll | 4 +-
.../RISCV/rvv/undef-earlyclobber-chain.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vcpop.ll | 14 +-
.../RISCV/rvv/vector-deinterleave-fixed.ll | 2 +-
.../CodeGen/RISCV/rvv/vector-deinterleave.ll | 16 +-
.../RISCV/rvv/vector-interleave-fixed.ll | 8 +-
.../RISCV/rvv/vector-interleave-store.ll | 2 +-
.../CodeGen/RISCV/rvv/vector-interleave.ll | 30 +-
.../RISCV/rvv/vector-reassociations.ll | 8 +-
llvm/test/CodeGen/RISCV/rvv/vector-splice.ll | 24 +-
llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll | 12 +-
llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll | 12 +-
llvm/test/CodeGen/RISCV/rvv/vfirst.ll | 14 +-
llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll | 28 +-
.../RISCV/rvv/vfmadd-constrained-sdnode.ll | 8 +-
llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll | 12 +-
llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll | 2 +-
.../RISCV/rvv/vfnmadd-constrained-sdnode.ll | 2 +-
.../RISCV/rvv/vfnmsub-constrained-sdnode.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll | 12 +-
llvm/test/CodeGen/RISCV/rvv/vl-opt.ll | 4 +-
.../CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll | 330 +++---
.../CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll | 330 +++---
llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vmfeq.ll | 24 +-
llvm/test/CodeGen/RISCV/rvv/vmfge.ll | 24 +-
llvm/test/CodeGen/RISCV/rvv/vmfgt.ll | 24 +-
llvm/test/CodeGen/RISCV/rvv/vmfle.ll | 24 +-
llvm/test/CodeGen/RISCV/rvv/vmflt.ll | 24 +-
llvm/test/CodeGen/RISCV/rvv/vmfne.ll | 24 +-
llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vmsbf.ll | 14 +-
llvm/test/CodeGen/RISCV/rvv/vmseq.ll | 72 +-
llvm/test/CodeGen/RISCV/rvv/vmsge.ll | 74 +-
llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll | 72 +-
llvm/test/CodeGen/RISCV/rvv/vmsgt.ll | 72 +-
llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll | 72 +-
llvm/test/CodeGen/RISCV/rvv/vmsif.ll | 14 +-
llvm/test/CodeGen/RISCV/rvv/vmsle.ll | 72 +-
llvm/test/CodeGen/RISCV/rvv/vmsleu.ll | 72 +-
llvm/test/CodeGen/RISCV/rvv/vmslt.ll | 72 +-
llvm/test/CodeGen/RISCV/rvv/vmsltu.ll | 72 +-
llvm/test/CodeGen/RISCV/rvv/vmsne.ll | 72 +-
llvm/test/CodeGen/RISCV/rvv/vmsof.ll | 14 +-
.../CodeGen/RISCV/rvv/vmv.v.v-peephole.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll | 8 +-
llvm/test/CodeGen/RISCV/rvv/vp-select.ll | 2 +-
.../RISCV/rvv/vp-splice-mask-fixed-vectors.ll | 24 +-
.../RISCV/rvv/vp-splice-mask-vectors.ll | 42 +-
.../test/CodeGen/RISCV/rvv/vpgather-sdnode.ll | 20 +-
llvm/test/CodeGen/RISCV/rvv/vpload.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vpstore.ll | 4 +-
.../CodeGen/RISCV/rvv/vreductions-mask-vp.ll | 74 +-
.../RISCV/rvv/vrgatherei16-subreg-liveness.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vselect-bf16.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vselect-int.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll | 16 +-
.../CodeGen/RISCV/rvv/vsetvli-insert-O0.ll | 12 +-
llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll | 8 +-
llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll | 2 +-
172 files changed, 2127 insertions(+), 2127 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-v-constraint.ll b/llvm/test/CodeGen/RISCV/inline-asm-v-constraint.ll
index 6b566e2df0d798..77ffdd9ae934a6 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-v-constraint.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-v-constraint.ll
@@ -45,7 +45,7 @@ define <vscale x 1 x i8> @constraint_vd(<vscale x 1 x i8> %0, <vscale x 1 x i8>
define <vscale x 1 x i1> @constraint_vm(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1) nounwind {
; RV32I-LABEL: constraint_vm:
; RV32I: # %bb.0:
-; RV32I-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32I-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32I-NEXT: vmv1r.v v9, v0
; RV32I-NEXT: vmv1r.v v0, v8
; RV32I-NEXT: #APP
@@ -55,7 +55,7 @@ define <vscale x 1 x i1> @constraint_vm(<vscale x 1 x i1> %0, <vscale x 1 x i1>
;
; RV64I-LABEL: constraint_vm:
; RV64I: # %bb.0:
-; RV64I-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64I-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64I-NEXT: vmv1r.v v9, v0
; RV64I-NEXT: vmv1r.v v0, v8
; RV64I-NEXT: #APP
diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
index 685e29ef6d9179..d91659d9e1c7c4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
@@ -567,7 +567,7 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64(<vscale x 16 x i64> %va, <vscale x 1
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
index 3d0a5cc77ef679..2836459ec5e0a3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
@@ -3075,7 +3075,7 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -3159,7 +3159,7 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
;
; CHECK-ZVBB-LABEL: vp_bitreverse_nxv64i16:
; CHECK-ZVBB: # %bb.0:
-; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v24, v0
; CHECK-ZVBB-NEXT: csrr a1, vlenb
; CHECK-ZVBB-NEXT: srli a2, a1, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
index 19f30a7ce438aa..113ae37b08ae65 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
@@ -1584,7 +1584,7 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -1632,7 +1632,7 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
;
; CHECK-ZVKB-LABEL: vp_bswap_nxv64i16:
; CHECK-ZVKB: # %bb.0:
-; CHECK-ZVKB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-ZVKB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-ZVKB-NEXT: vmv1r.v v24, v0
; CHECK-ZVKB-NEXT: csrr a1, vlenb
; CHECK-ZVKB-NEXT: srli a2, a1, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
index e85a7af56cc497..5c533f042b4e53 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
@@ -336,7 +336,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_i32(<vsca
; RV32-NEXT: add a1, a3, a1
; RV32-NEXT: li a3, 2
; RV32-NEXT: vs8r.v v16, (a1)
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv8r.v v8, v0
; RV32-NEXT: vmv8r.v v16, v24
; RV32-NEXT: call ext2
@@ -375,7 +375,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_i32(<vsca
; RV64-NEXT: add a1, a3, a1
; RV64-NEXT: li a3, 2
; RV64-NEXT: vs8r.v v16, (a1)
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv8r.v v8, v0
; RV64-NEXT: vmv8r.v v16, v24
; RV64-NEXT: call ext2
@@ -453,7 +453,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 128
; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv8r.v v16, v0
; RV32-NEXT: call ext3
; RV32-NEXT: addi sp, s0, -144
@@ -526,7 +526,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 128
; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv8r.v v16, v0
; RV64-NEXT: call ext3
; RV64-NEXT: addi sp, s0, -144
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
index 05873a4e83aa29..068fdad8a4ab3e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
@@ -103,7 +103,7 @@ define target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @caller_tuple_return(
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: call callee_tuple_return
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv2r.v v6, v8
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: vmv2r.v v10, v6
@@ -120,7 +120,7 @@ define target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @caller_tuple_return(
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: call callee_tuple_return
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv2r.v v6, v8
; RV64-NEXT: vmv2r.v v8, v10
; RV64-NEXT: vmv2r.v v10, v6
@@ -146,7 +146,7 @@ define void @caller_tuple_argument(target("riscv.vector.tuple", <vscale x 16 x i
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv2r.v v6, v8
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: vmv2r.v v10, v6
@@ -163,7 +163,7 @@ define void @caller_tuple_argument(target("riscv.vector.tuple", <vscale x 16 x i
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv2r.v v6, v8
; RV64-NEXT: vmv2r.v v8, v10
; RV64-NEXT: vmv2r.v v10, v6
diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
index b5cf302605f885..e0522b7eb1e67a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
@@ -117,7 +117,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.ceil.nxv4bf16(<vscale x 4 x bfloat>, <vsc
define <vscale x 4 x bfloat> @vp_ceil_vv_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv4bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -170,7 +170,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.ceil.nxv8bf16(<vscale x 8 x bfloat>, <vsc
define <vscale x 8 x bfloat> @vp_ceil_vv_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv8bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -223,7 +223,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.ceil.nxv16bf16(<vscale x 16 x bfloat>, <
define <vscale x 16 x bfloat> @vp_ceil_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv16bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -282,7 +282,7 @@ define <vscale x 32 x bfloat> @vp_ceil_vv_nxv32bf16(<vscale x 32 x bfloat> %va,
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -586,7 +586,7 @@ define <vscale x 4 x half> @vp_ceil_vv_nxv4f16(<vscale x 4 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vp_ceil_vv_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -654,7 +654,7 @@ declare <vscale x 8 x half> @llvm.vp.ceil.nxv8f16(<vscale x 8 x half>, <vscale x
define <vscale x 8 x half> @vp_ceil_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -674,7 +674,7 @@ define <vscale x 8 x half> @vp_ceil_vv_nxv8f16(<vscale x 8 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vp_ceil_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -742,7 +742,7 @@ declare <vscale x 16 x half> @llvm.vp.ceil.nxv16f16(<vscale x 16 x half>, <vscal
define <vscale x 16 x half> @vp_ceil_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -762,7 +762,7 @@ define <vscale x 16 x half> @vp_ceil_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
;
; ZVFHMIN-LABEL: vp_ceil_vv_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -830,7 +830,7 @@ declare <vscale x 32 x half> @llvm.vp.ceil.nxv32f16(<vscale x 32 x half>, <vscal
define <vscale x 32 x half> @vp_ceil_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -856,7 +856,7 @@ define <vscale x 32 x half> @vp_ceil_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -1079,7 +1079,7 @@ declare <vscale x 4 x float> @llvm.vp.ceil.nxv4f32(<vscale x 4 x float>, <vscale
define <vscale x 4 x float> @vp_ceil_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1124,7 +1124,7 @@ declare <vscale x 8 x float> @llvm.vp.ceil.nxv8f32(<vscale x 8 x float>, <vscale
define <vscale x 8 x float> @vp_ceil_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1169,7 +1169,7 @@ declare <vscale x 16 x float> @llvm.vp.ceil.nxv16f32(<vscale x 16 x float>, <vsc
define <vscale x 16 x float> @vp_ceil_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1256,7 +1256,7 @@ declare <vscale x 2 x double> @llvm.vp.ceil.nxv2f64(<vscale x 2 x double>, <vsca
define <vscale x 2 x double> @vp_ceil_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1301,7 +1301,7 @@ declare <vscale x 4 x double> @llvm.vp.ceil.nxv4f64(<vscale x 4 x double>, <vsca
define <vscale x 4 x double> @vp_ceil_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1346,7 +1346,7 @@ declare <vscale x 7 x double> @llvm.vp.ceil.nxv7f64(<vscale x 7 x double>, <vsca
define <vscale x 7 x double> @vp_ceil_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1391,7 +1391,7 @@ declare <vscale x 8 x double> @llvm.vp.ceil.nxv8f64(<vscale x 8 x double>, <vsca
define <vscale x 8 x double> @vp_ceil_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1443,7 +1443,7 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64(<vscale x 16 x double> %va, <
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
index d1679b6e2d7fdf..20b12df953a18c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
@@ -197,7 +197,7 @@ entry:
define void @test_compresstore_v256i8(ptr %p, <256 x i1> %mask, <256 x i8> %data) {
; RV64-LABEL: test_compresstore_v256i8:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v7, v8
; RV64-NEXT: li a2, 128
; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
@@ -231,7 +231,7 @@ define void @test_compresstore_v256i8(ptr %p, <256 x i1> %mask, <256 x i8> %data
; RV32-NEXT: slli a2, a2, 3
; RV32-NEXT: sub sp, sp, a2
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv8r.v v24, v16
; RV32-NEXT: li a2, 128
; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
index 279eccce002ea8..58236dc1439273 100644
--- a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
@@ -18,7 +18,7 @@
define void @constant_folding_crash(ptr %v54, <4 x ptr> %lanes.a, <4 x ptr> %lanes.b, <4 x i1> %sel) {
; RV32-LABEL: constant_folding_crash:
; RV32: # %bb.0: # %entry
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v10, v0
; RV32-NEXT: lw a0, 8(a0)
; RV32-NEXT: andi a0, a0, 1
@@ -44,7 +44,7 @@ define void @constant_folding_crash(ptr %v54, <4 x ptr> %lanes.a, <4 x ptr> %lan
;
; RV64-LABEL: constant_folding_crash:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: ld a0, 8(a0)
; RV64-NEXT: andi a0, a0, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
index f18420d895cd97..4c818a515f96e9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
@@ -1235,7 +1235,7 @@ declare <vscale x 16 x i64> @llvm.vp.ctlz.nxv16i64(<vscale x 16 x i64>, i1 immar
define <vscale x 16 x i64> @vp_ctlz_nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ctlz_nxv16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: fsrmi a4, 1
@@ -1271,7 +1271,7 @@ define <vscale x 16 x i64> @vp_ctlz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
;
; CHECK-ZVBB-LABEL: vp_ctlz_nxv16i64:
; CHECK-ZVBB: # %bb.0:
-; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v24, v0
; CHECK-ZVBB-NEXT: csrr a1, vlenb
; CHECK-ZVBB-NEXT: srli a2, a1, 3
@@ -2467,7 +2467,7 @@ define <vscale x 8 x i64> @vp_ctlz_zero_undef_nxv8i64_unmasked(<vscale x 8 x i64
define <vscale x 16 x i64> @vp_ctlz_zero_undef_nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ctlz_zero_undef_nxv16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: fsrmi a3, 1
@@ -2500,7 +2500,7 @@ define <vscale x 16 x i64> @vp_ctlz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
;
; CHECK-ZVBB-LABEL: vp_ctlz_zero_undef_nxv16i64:
; CHECK-ZVBB: # %bb.0:
-; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v24, v0
; CHECK-ZVBB-NEXT: csrr a1, vlenb
; CHECK-ZVBB-NEXT: srli a2, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
index ec2f08f6014292..efe1b4293145fa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
@@ -2022,7 +2022,7 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v7, v0
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: li a2, 24
@@ -2295,7 +2295,7 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
;
; CHECK-ZVBB-LABEL: vp_ctpop_nxv16i64:
; CHECK-ZVBB: # %bb.0:
-; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v24, v0
; CHECK-ZVBB-NEXT: csrr a1, vlenb
; CHECK-ZVBB-NEXT: srli a2, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
index d6f4656a77c65f..96d3e446387fe7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
@@ -2246,7 +2246,7 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 56 * vlenb
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v24, v0
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: slli a1, a1, 5
@@ -2500,7 +2500,7 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV64-NEXT: slli a1, a1, 4
; RV64-NEXT: sub sp, sp, a1
; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v24, v0
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: slli a1, a1, 3
@@ -2588,7 +2588,7 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
;
; CHECK-ZVBB-LABEL: vp_cttz_nxv16i64:
; CHECK-ZVBB: # %bb.0:
-; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v24, v0
; CHECK-ZVBB-NEXT: csrr a1, vlenb
; CHECK-ZVBB-NEXT: srli a2, a1, 3
@@ -4005,7 +4005,7 @@ define <vscale x 16 x i64> @vp_cttz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -4061,7 +4061,7 @@ define <vscale x 16 x i64> @vp_cttz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
;
; CHECK-ZVBB-LABEL: vp_cttz_zero_undef_nxv16i64:
; CHECK-ZVBB: # %bb.0:
-; CHECK-ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v24, v0
; CHECK-ZVBB-NEXT: csrr a1, vlenb
; CHECK-ZVBB-NEXT: srli a2, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/expandload.ll b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
index d1c5df1b3efa91..11cf9fe86a9488 100644
--- a/llvm/test/CodeGen/RISCV/rvv/expandload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
@@ -227,7 +227,7 @@ define <256 x i8> @test_expandload_v256i8(ptr %base, <256 x i1> %mask, <256 x i8
; CHECK-RV32-NEXT: add a2, sp, a2
; CHECK-RV32-NEXT: addi a2, a2, 16
; CHECK-RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv1r.v v7, v8
; CHECK-RV32-NEXT: li a2, 128
; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
@@ -339,7 +339,7 @@ define <256 x i8> @test_expandload_v256i8(ptr %base, <256 x i1> %mask, <256 x i8
; CHECK-RV64-NEXT: add a2, sp, a2
; CHECK-RV64-NEXT: addi a2, a2, 16
; CHECK-RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv1r.v v7, v8
; CHECK-RV64-NEXT: li a2, 128
; CHECK-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
@@ -1628,7 +1628,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a1, .LBB61_30
; CHECK-RV32-NEXT: .LBB61_29: # %cond.load109
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 29, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -1642,7 +1642,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a2, .LBB61_32
; CHECK-RV32-NEXT: # %bb.31: # %cond.load113
; CHECK-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 30, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a2
@@ -1791,7 +1791,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_65: # %cond.load241
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -1945,7 +1945,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_99: # %cond.load369
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -2099,7 +2099,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_133: # %cond.load497
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -2253,7 +2253,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_167: # %cond.load625
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -2407,7 +2407,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_201: # %cond.load753
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -2561,7 +2561,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_235: # %cond.load881
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -2715,7 +2715,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_269: # %cond.load1009
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -3931,7 +3931,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_3
; CHECK-RV32-NEXT: .LBB61_546: # %cond.load5
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 3, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -3944,7 +3944,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_4
; CHECK-RV32-NEXT: .LBB61_547: # %cond.load9
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -3957,7 +3957,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_5
; CHECK-RV32-NEXT: .LBB61_548: # %cond.load13
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 5, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -3970,7 +3970,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_6
; CHECK-RV32-NEXT: .LBB61_549: # %cond.load17
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 6, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -3983,7 +3983,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_7
; CHECK-RV32-NEXT: .LBB61_550: # %cond.load21
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 7, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -3996,7 +3996,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_8
; CHECK-RV32-NEXT: .LBB61_551: # %cond.load25
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 8, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4009,7 +4009,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_9
; CHECK-RV32-NEXT: .LBB61_552: # %cond.load29
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 9, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4022,7 +4022,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_10
; CHECK-RV32-NEXT: .LBB61_553: # %cond.load33
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 10, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4035,7 +4035,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_11
; CHECK-RV32-NEXT: .LBB61_554: # %cond.load37
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 11, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4048,7 +4048,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_12
; CHECK-RV32-NEXT: .LBB61_555: # %cond.load41
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 12, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4061,7 +4061,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_13
; CHECK-RV32-NEXT: .LBB61_556: # %cond.load45
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 13, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4074,7 +4074,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_14
; CHECK-RV32-NEXT: .LBB61_557: # %cond.load49
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 14, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4087,7 +4087,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_15
; CHECK-RV32-NEXT: .LBB61_558: # %cond.load53
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 15, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4100,7 +4100,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_16
; CHECK-RV32-NEXT: .LBB61_559: # %cond.load57
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 16, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4113,7 +4113,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_17
; CHECK-RV32-NEXT: .LBB61_560: # %cond.load61
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 17, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4126,7 +4126,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_18
; CHECK-RV32-NEXT: .LBB61_561: # %cond.load65
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 18, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4139,7 +4139,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_19
; CHECK-RV32-NEXT: .LBB61_562: # %cond.load69
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 19, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4152,7 +4152,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_20
; CHECK-RV32-NEXT: .LBB61_563: # %cond.load73
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 20, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4165,7 +4165,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_21
; CHECK-RV32-NEXT: .LBB61_564: # %cond.load77
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 21, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4178,7 +4178,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_22
; CHECK-RV32-NEXT: .LBB61_565: # %cond.load81
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 22, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4191,7 +4191,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_23
; CHECK-RV32-NEXT: .LBB61_566: # %cond.load85
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 23, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4204,7 +4204,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_24
; CHECK-RV32-NEXT: .LBB61_567: # %cond.load89
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 24, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4217,7 +4217,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_25
; CHECK-RV32-NEXT: .LBB61_568: # %cond.load93
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 25, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4230,7 +4230,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_26
; CHECK-RV32-NEXT: .LBB61_569: # %cond.load97
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 26, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4243,7 +4243,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_27
; CHECK-RV32-NEXT: .LBB61_570: # %cond.load101
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 27, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4256,7 +4256,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_28
; CHECK-RV32-NEXT: .LBB61_571: # %cond.load105
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 28, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4285,7 +4285,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_573: # %cond.load125
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4302,7 +4302,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_574: # %cond.load129
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4319,7 +4319,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_575: # %cond.load133
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4336,7 +4336,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_576: # %cond.load137
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4353,7 +4353,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_577: # %cond.load141
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4370,7 +4370,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_578: # %cond.load145
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4387,7 +4387,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_579: # %cond.load149
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4404,7 +4404,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_580: # %cond.load153
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4421,7 +4421,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_581: # %cond.load157
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4438,7 +4438,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_582: # %cond.load161
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4455,7 +4455,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_583: # %cond.load165
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4472,7 +4472,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_584: # %cond.load169
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4489,7 +4489,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_585: # %cond.load173
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4506,7 +4506,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_586: # %cond.load177
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4523,7 +4523,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_587: # %cond.load181
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4540,7 +4540,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_588: # %cond.load185
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4557,7 +4557,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_589: # %cond.load189
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4574,7 +4574,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_590: # %cond.load193
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4591,7 +4591,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_591: # %cond.load197
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4608,7 +4608,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_592: # %cond.load201
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4625,7 +4625,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_593: # %cond.load205
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4642,7 +4642,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_594: # %cond.load209
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4659,7 +4659,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_595: # %cond.load213
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4676,7 +4676,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_596: # %cond.load217
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4693,7 +4693,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_597: # %cond.load221
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4710,7 +4710,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_598: # %cond.load225
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4727,7 +4727,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_599: # %cond.load229
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4744,7 +4744,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_600: # %cond.load233
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4761,7 +4761,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_601: # %cond.load237
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4794,7 +4794,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_603: # %cond.load253
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4811,7 +4811,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_604: # %cond.load257
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4828,7 +4828,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_605: # %cond.load261
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4845,7 +4845,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_606: # %cond.load265
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4862,7 +4862,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_607: # %cond.load269
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4879,7 +4879,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_608: # %cond.load273
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4896,7 +4896,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_609: # %cond.load277
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4913,7 +4913,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_610: # %cond.load281
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4930,7 +4930,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_611: # %cond.load285
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4947,7 +4947,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_612: # %cond.load289
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4964,7 +4964,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_613: # %cond.load293
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4981,7 +4981,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_614: # %cond.load297
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4998,7 +4998,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_615: # %cond.load301
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5015,7 +5015,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_616: # %cond.load305
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5032,7 +5032,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_617: # %cond.load309
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5049,7 +5049,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_618: # %cond.load313
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5066,7 +5066,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_619: # %cond.load317
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5083,7 +5083,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_620: # %cond.load321
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5100,7 +5100,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_621: # %cond.load325
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5117,7 +5117,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_622: # %cond.load329
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5134,7 +5134,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_623: # %cond.load333
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5151,7 +5151,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_624: # %cond.load337
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5168,7 +5168,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_625: # %cond.load341
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5185,7 +5185,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_626: # %cond.load345
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5202,7 +5202,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_627: # %cond.load349
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5219,7 +5219,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_628: # %cond.load353
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5236,7 +5236,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_629: # %cond.load357
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5253,7 +5253,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_630: # %cond.load361
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5270,7 +5270,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_631: # %cond.load365
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5303,7 +5303,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_633: # %cond.load381
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5320,7 +5320,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_634: # %cond.load385
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5337,7 +5337,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_635: # %cond.load389
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5354,7 +5354,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_636: # %cond.load393
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5371,7 +5371,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_637: # %cond.load397
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5388,7 +5388,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_638: # %cond.load401
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5405,7 +5405,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_639: # %cond.load405
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5422,7 +5422,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_640: # %cond.load409
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5439,7 +5439,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_641: # %cond.load413
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5456,7 +5456,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_642: # %cond.load417
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5473,7 +5473,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_643: # %cond.load421
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5490,7 +5490,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_644: # %cond.load425
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5507,7 +5507,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_645: # %cond.load429
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5524,7 +5524,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_646: # %cond.load433
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5541,7 +5541,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_647: # %cond.load437
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5558,7 +5558,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_648: # %cond.load441
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5575,7 +5575,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_649: # %cond.load445
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5592,7 +5592,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_650: # %cond.load449
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5609,7 +5609,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_651: # %cond.load453
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5626,7 +5626,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_652: # %cond.load457
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5643,7 +5643,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_653: # %cond.load461
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5660,7 +5660,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_654: # %cond.load465
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5677,7 +5677,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_655: # %cond.load469
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5694,7 +5694,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_656: # %cond.load473
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5711,7 +5711,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_657: # %cond.load477
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5728,7 +5728,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_658: # %cond.load481
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5745,7 +5745,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_659: # %cond.load485
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5762,7 +5762,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_660: # %cond.load489
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5779,7 +5779,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_661: # %cond.load493
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5812,7 +5812,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_663: # %cond.load509
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5829,7 +5829,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_664: # %cond.load513
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5846,7 +5846,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_665: # %cond.load517
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5863,7 +5863,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_666: # %cond.load521
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5880,7 +5880,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_667: # %cond.load525
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5897,7 +5897,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_668: # %cond.load529
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5914,7 +5914,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_669: # %cond.load533
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5931,7 +5931,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_670: # %cond.load537
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5948,7 +5948,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_671: # %cond.load541
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5965,7 +5965,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_672: # %cond.load545
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5982,7 +5982,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_673: # %cond.load549
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5999,7 +5999,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_674: # %cond.load553
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6016,7 +6016,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_675: # %cond.load557
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6033,7 +6033,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_676: # %cond.load561
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6050,7 +6050,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_677: # %cond.load565
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6067,7 +6067,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_678: # %cond.load569
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6084,7 +6084,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_679: # %cond.load573
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6101,7 +6101,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_680: # %cond.load577
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6118,7 +6118,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_681: # %cond.load581
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6135,7 +6135,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_682: # %cond.load585
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6152,7 +6152,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_683: # %cond.load589
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6169,7 +6169,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_684: # %cond.load593
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6186,7 +6186,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_685: # %cond.load597
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6203,7 +6203,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_686: # %cond.load601
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6220,7 +6220,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_687: # %cond.load605
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6237,7 +6237,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_688: # %cond.load609
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6254,7 +6254,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_689: # %cond.load613
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6271,7 +6271,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_690: # %cond.load617
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6288,7 +6288,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_691: # %cond.load621
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6321,7 +6321,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_693: # %cond.load637
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6338,7 +6338,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_694: # %cond.load641
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6355,7 +6355,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_695: # %cond.load645
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6372,7 +6372,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_696: # %cond.load649
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6389,7 +6389,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_697: # %cond.load653
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6406,7 +6406,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_698: # %cond.load657
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6423,7 +6423,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_699: # %cond.load661
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6440,7 +6440,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_700: # %cond.load665
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6457,7 +6457,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_701: # %cond.load669
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6474,7 +6474,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_702: # %cond.load673
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6491,7 +6491,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_703: # %cond.load677
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6508,7 +6508,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_704: # %cond.load681
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6525,7 +6525,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_705: # %cond.load685
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6542,7 +6542,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_706: # %cond.load689
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6559,7 +6559,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_707: # %cond.load693
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6576,7 +6576,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_708: # %cond.load697
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6593,7 +6593,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_709: # %cond.load701
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6610,7 +6610,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_710: # %cond.load705
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6627,7 +6627,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_711: # %cond.load709
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6644,7 +6644,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_712: # %cond.load713
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6661,7 +6661,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_713: # %cond.load717
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6678,7 +6678,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_714: # %cond.load721
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6695,7 +6695,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_715: # %cond.load725
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6712,7 +6712,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_716: # %cond.load729
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6729,7 +6729,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_717: # %cond.load733
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6746,7 +6746,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_718: # %cond.load737
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6763,7 +6763,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_719: # %cond.load741
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6780,7 +6780,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_720: # %cond.load745
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6797,7 +6797,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_721: # %cond.load749
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6830,7 +6830,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_723: # %cond.load765
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6847,7 +6847,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_724: # %cond.load769
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6864,7 +6864,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_725: # %cond.load773
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6881,7 +6881,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_726: # %cond.load777
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6898,7 +6898,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_727: # %cond.load781
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6915,7 +6915,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_728: # %cond.load785
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6932,7 +6932,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_729: # %cond.load789
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6949,7 +6949,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_730: # %cond.load793
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6966,7 +6966,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_731: # %cond.load797
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6983,7 +6983,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_732: # %cond.load801
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7000,7 +7000,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_733: # %cond.load805
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7017,7 +7017,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_734: # %cond.load809
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7034,7 +7034,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_735: # %cond.load813
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7051,7 +7051,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_736: # %cond.load817
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7068,7 +7068,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_737: # %cond.load821
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7085,7 +7085,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_738: # %cond.load825
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7102,7 +7102,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_739: # %cond.load829
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7119,7 +7119,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_740: # %cond.load833
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7136,7 +7136,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_741: # %cond.load837
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7153,7 +7153,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_742: # %cond.load841
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7170,7 +7170,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_743: # %cond.load845
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7187,7 +7187,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_744: # %cond.load849
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7204,7 +7204,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_745: # %cond.load853
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7221,7 +7221,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_746: # %cond.load857
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7238,7 +7238,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_747: # %cond.load861
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7255,7 +7255,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_748: # %cond.load865
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7272,7 +7272,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_749: # %cond.load869
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7289,7 +7289,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_750: # %cond.load873
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7306,7 +7306,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_751: # %cond.load877
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7339,7 +7339,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_753: # %cond.load893
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7356,7 +7356,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_754: # %cond.load897
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7373,7 +7373,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_755: # %cond.load901
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7390,7 +7390,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_756: # %cond.load905
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7407,7 +7407,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_757: # %cond.load909
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7424,7 +7424,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_758: # %cond.load913
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7441,7 +7441,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_759: # %cond.load917
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7458,7 +7458,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_760: # %cond.load921
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7475,7 +7475,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_761: # %cond.load925
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7492,7 +7492,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_762: # %cond.load929
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7509,7 +7509,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_763: # %cond.load933
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7526,7 +7526,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_764: # %cond.load937
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7543,7 +7543,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_765: # %cond.load941
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7560,7 +7560,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_766: # %cond.load945
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7577,7 +7577,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_767: # %cond.load949
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7594,7 +7594,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_768: # %cond.load953
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7611,7 +7611,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_769: # %cond.load957
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7628,7 +7628,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_770: # %cond.load961
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7645,7 +7645,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_771: # %cond.load965
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7662,7 +7662,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_772: # %cond.load969
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7679,7 +7679,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_773: # %cond.load973
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7696,7 +7696,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_774: # %cond.load977
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7713,7 +7713,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_775: # %cond.load981
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7730,7 +7730,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_776: # %cond.load985
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7747,7 +7747,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_777: # %cond.load989
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7764,7 +7764,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_778: # %cond.load993
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7781,7 +7781,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_779: # %cond.load997
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7798,7 +7798,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_780: # %cond.load1001
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7815,7 +7815,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_781: # %cond.load1005
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -11239,7 +11239,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_62: # %cond.load241
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -11521,7 +11521,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_128: # %cond.load497
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -11803,7 +11803,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_194: # %cond.load753
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -12085,7 +12085,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_260: # %cond.load1009
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -13225,7 +13225,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_3
; CHECK-RV64-NEXT: .LBB61_529: # %cond.load5
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 3, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13238,7 +13238,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_4
; CHECK-RV64-NEXT: .LBB61_530: # %cond.load9
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13251,7 +13251,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_5
; CHECK-RV64-NEXT: .LBB61_531: # %cond.load13
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 5, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13264,7 +13264,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_6
; CHECK-RV64-NEXT: .LBB61_532: # %cond.load17
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 6, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13277,7 +13277,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_7
; CHECK-RV64-NEXT: .LBB61_533: # %cond.load21
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 7, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13290,7 +13290,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_8
; CHECK-RV64-NEXT: .LBB61_534: # %cond.load25
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 8, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13303,7 +13303,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_9
; CHECK-RV64-NEXT: .LBB61_535: # %cond.load29
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 9, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13316,7 +13316,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_10
; CHECK-RV64-NEXT: .LBB61_536: # %cond.load33
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 10, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13329,7 +13329,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_11
; CHECK-RV64-NEXT: .LBB61_537: # %cond.load37
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 11, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13342,7 +13342,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_12
; CHECK-RV64-NEXT: .LBB61_538: # %cond.load41
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 12, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13355,7 +13355,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_13
; CHECK-RV64-NEXT: .LBB61_539: # %cond.load45
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 13, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13368,7 +13368,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_14
; CHECK-RV64-NEXT: .LBB61_540: # %cond.load49
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 14, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13381,7 +13381,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_15
; CHECK-RV64-NEXT: .LBB61_541: # %cond.load53
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 15, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13394,7 +13394,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_16
; CHECK-RV64-NEXT: .LBB61_542: # %cond.load57
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 16, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13407,7 +13407,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_17
; CHECK-RV64-NEXT: .LBB61_543: # %cond.load61
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 17, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13420,7 +13420,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_18
; CHECK-RV64-NEXT: .LBB61_544: # %cond.load65
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 18, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13433,7 +13433,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_19
; CHECK-RV64-NEXT: .LBB61_545: # %cond.load69
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 19, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13446,7 +13446,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_20
; CHECK-RV64-NEXT: .LBB61_546: # %cond.load73
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 20, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13459,7 +13459,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_21
; CHECK-RV64-NEXT: .LBB61_547: # %cond.load77
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 21, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13472,7 +13472,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_22
; CHECK-RV64-NEXT: .LBB61_548: # %cond.load81
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 22, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13485,7 +13485,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_23
; CHECK-RV64-NEXT: .LBB61_549: # %cond.load85
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 23, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13498,7 +13498,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_24
; CHECK-RV64-NEXT: .LBB61_550: # %cond.load89
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 24, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13511,7 +13511,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_25
; CHECK-RV64-NEXT: .LBB61_551: # %cond.load93
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 25, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13524,7 +13524,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_26
; CHECK-RV64-NEXT: .LBB61_552: # %cond.load97
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 26, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13537,7 +13537,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_27
; CHECK-RV64-NEXT: .LBB61_553: # %cond.load101
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 27, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13550,7 +13550,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_28
; CHECK-RV64-NEXT: .LBB61_554: # %cond.load105
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 28, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13563,7 +13563,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_29
; CHECK-RV64-NEXT: .LBB61_555: # %cond.load109
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 29, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13576,7 +13576,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_30
; CHECK-RV64-NEXT: .LBB61_556: # %cond.load113
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 30, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13589,7 +13589,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_31
; CHECK-RV64-NEXT: .LBB61_557: # %cond.load117
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 31, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13603,7 +13603,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_558: # %cond.load121
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13619,7 +13619,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_559: # %cond.load125
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13636,7 +13636,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_560: # %cond.load129
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13653,7 +13653,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_561: # %cond.load133
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13670,7 +13670,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_562: # %cond.load137
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13687,7 +13687,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_563: # %cond.load141
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13704,7 +13704,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_564: # %cond.load145
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13721,7 +13721,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_565: # %cond.load149
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13738,7 +13738,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_566: # %cond.load153
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13755,7 +13755,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_567: # %cond.load157
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13772,7 +13772,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_568: # %cond.load161
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13789,7 +13789,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_569: # %cond.load165
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13806,7 +13806,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_570: # %cond.load169
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13823,7 +13823,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_571: # %cond.load173
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13840,7 +13840,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_572: # %cond.load177
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13857,7 +13857,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_573: # %cond.load181
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13874,7 +13874,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_574: # %cond.load185
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13891,7 +13891,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_575: # %cond.load189
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13908,7 +13908,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_576: # %cond.load193
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13925,7 +13925,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_577: # %cond.load197
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13942,7 +13942,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_578: # %cond.load201
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13959,7 +13959,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_579: # %cond.load205
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13976,7 +13976,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_580: # %cond.load209
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13993,7 +13993,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_581: # %cond.load213
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -14010,7 +14010,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_582: # %cond.load217
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -14027,7 +14027,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_583: # %cond.load221
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -14044,7 +14044,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_584: # %cond.load225
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -14061,7 +14061,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_585: # %cond.load229
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -14078,7 +14078,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_586: # %cond.load233
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -14095,7 +14095,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_587: # %cond.load237
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -14128,7 +14128,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_589: # %cond.load253
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14145,7 +14145,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_590: # %cond.load257
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14162,7 +14162,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_591: # %cond.load261
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14179,7 +14179,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_592: # %cond.load265
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14196,7 +14196,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_593: # %cond.load269
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14213,7 +14213,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_594: # %cond.load273
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14230,7 +14230,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_595: # %cond.load277
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14247,7 +14247,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_596: # %cond.load281
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14264,7 +14264,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_597: # %cond.load285
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14281,7 +14281,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_598: # %cond.load289
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14298,7 +14298,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_599: # %cond.load293
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14315,7 +14315,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_600: # %cond.load297
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14332,7 +14332,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_601: # %cond.load301
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14349,7 +14349,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_602: # %cond.load305
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14366,7 +14366,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_603: # %cond.load309
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14383,7 +14383,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_604: # %cond.load313
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14400,7 +14400,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_605: # %cond.load317
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14417,7 +14417,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_606: # %cond.load321
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14434,7 +14434,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_607: # %cond.load325
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14451,7 +14451,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_608: # %cond.load329
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14468,7 +14468,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_609: # %cond.load333
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14485,7 +14485,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_610: # %cond.load337
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14502,7 +14502,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_611: # %cond.load341
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14519,7 +14519,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_612: # %cond.load345
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14536,7 +14536,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_613: # %cond.load349
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14553,7 +14553,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_614: # %cond.load353
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14570,7 +14570,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_615: # %cond.load357
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14587,7 +14587,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_616: # %cond.load361
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14604,7 +14604,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_617: # %cond.load365
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14621,7 +14621,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_618: # %cond.load369
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14638,7 +14638,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_619: # %cond.load373
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14655,7 +14655,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_620: # %cond.load377
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14672,7 +14672,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_621: # %cond.load381
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14689,7 +14689,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_622: # %cond.load385
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14706,7 +14706,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_623: # %cond.load389
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14723,7 +14723,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_624: # %cond.load393
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14740,7 +14740,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_625: # %cond.load397
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14757,7 +14757,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_626: # %cond.load401
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14774,7 +14774,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_627: # %cond.load405
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14791,7 +14791,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_628: # %cond.load409
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14808,7 +14808,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_629: # %cond.load413
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14825,7 +14825,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_630: # %cond.load417
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14842,7 +14842,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_631: # %cond.load421
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14859,7 +14859,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_632: # %cond.load425
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14876,7 +14876,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_633: # %cond.load429
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14893,7 +14893,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_634: # %cond.load433
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14910,7 +14910,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_635: # %cond.load437
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14927,7 +14927,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_636: # %cond.load441
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14944,7 +14944,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_637: # %cond.load445
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14961,7 +14961,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_638: # %cond.load449
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14978,7 +14978,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_639: # %cond.load453
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14995,7 +14995,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_640: # %cond.load457
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -15012,7 +15012,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_641: # %cond.load461
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -15029,7 +15029,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_642: # %cond.load465
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -15046,7 +15046,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_643: # %cond.load469
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -15063,7 +15063,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_644: # %cond.load473
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -15080,7 +15080,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_645: # %cond.load477
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -15097,7 +15097,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_646: # %cond.load481
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -15114,7 +15114,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_647: # %cond.load485
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -15131,7 +15131,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_648: # %cond.load489
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -15148,7 +15148,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_649: # %cond.load493
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -15181,7 +15181,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_651: # %cond.load509
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15198,7 +15198,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_652: # %cond.load513
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15215,7 +15215,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_653: # %cond.load517
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15232,7 +15232,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_654: # %cond.load521
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15249,7 +15249,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_655: # %cond.load525
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15266,7 +15266,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_656: # %cond.load529
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15283,7 +15283,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_657: # %cond.load533
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15300,7 +15300,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_658: # %cond.load537
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15317,7 +15317,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_659: # %cond.load541
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15334,7 +15334,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_660: # %cond.load545
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15351,7 +15351,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_661: # %cond.load549
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15368,7 +15368,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_662: # %cond.load553
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15385,7 +15385,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_663: # %cond.load557
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15402,7 +15402,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_664: # %cond.load561
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15419,7 +15419,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_665: # %cond.load565
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15436,7 +15436,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_666: # %cond.load569
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15453,7 +15453,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_667: # %cond.load573
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15470,7 +15470,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_668: # %cond.load577
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15487,7 +15487,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_669: # %cond.load581
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15504,7 +15504,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_670: # %cond.load585
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15521,7 +15521,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_671: # %cond.load589
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15538,7 +15538,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_672: # %cond.load593
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15555,7 +15555,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_673: # %cond.load597
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15572,7 +15572,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_674: # %cond.load601
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15589,7 +15589,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_675: # %cond.load605
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15606,7 +15606,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_676: # %cond.load609
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15623,7 +15623,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_677: # %cond.load613
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15640,7 +15640,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_678: # %cond.load617
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15657,7 +15657,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_679: # %cond.load621
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15674,7 +15674,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_680: # %cond.load625
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15691,7 +15691,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_681: # %cond.load629
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15708,7 +15708,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_682: # %cond.load633
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15725,7 +15725,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_683: # %cond.load637
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15742,7 +15742,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_684: # %cond.load641
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15759,7 +15759,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_685: # %cond.load645
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15776,7 +15776,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_686: # %cond.load649
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15793,7 +15793,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_687: # %cond.load653
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15810,7 +15810,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_688: # %cond.load657
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15827,7 +15827,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_689: # %cond.load661
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15844,7 +15844,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_690: # %cond.load665
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15861,7 +15861,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_691: # %cond.load669
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15878,7 +15878,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_692: # %cond.load673
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15895,7 +15895,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_693: # %cond.load677
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15912,7 +15912,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_694: # %cond.load681
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15929,7 +15929,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_695: # %cond.load685
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15946,7 +15946,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_696: # %cond.load689
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15963,7 +15963,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_697: # %cond.load693
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15980,7 +15980,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_698: # %cond.load697
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15997,7 +15997,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_699: # %cond.load701
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16014,7 +16014,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_700: # %cond.load705
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16031,7 +16031,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_701: # %cond.load709
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16048,7 +16048,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_702: # %cond.load713
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16065,7 +16065,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_703: # %cond.load717
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16082,7 +16082,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_704: # %cond.load721
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16099,7 +16099,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_705: # %cond.load725
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16116,7 +16116,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_706: # %cond.load729
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16133,7 +16133,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_707: # %cond.load733
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16150,7 +16150,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_708: # %cond.load737
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16167,7 +16167,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_709: # %cond.load741
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16184,7 +16184,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_710: # %cond.load745
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16201,7 +16201,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_711: # %cond.load749
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16234,7 +16234,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_713: # %cond.load765
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16251,7 +16251,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_714: # %cond.load769
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16268,7 +16268,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_715: # %cond.load773
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16285,7 +16285,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_716: # %cond.load777
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16302,7 +16302,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_717: # %cond.load781
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16319,7 +16319,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_718: # %cond.load785
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16336,7 +16336,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_719: # %cond.load789
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16353,7 +16353,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_720: # %cond.load793
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16370,7 +16370,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_721: # %cond.load797
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16387,7 +16387,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_722: # %cond.load801
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16404,7 +16404,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_723: # %cond.load805
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16421,7 +16421,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_724: # %cond.load809
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16438,7 +16438,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_725: # %cond.load813
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16455,7 +16455,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_726: # %cond.load817
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16472,7 +16472,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_727: # %cond.load821
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16489,7 +16489,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_728: # %cond.load825
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16506,7 +16506,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_729: # %cond.load829
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16523,7 +16523,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_730: # %cond.load833
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16540,7 +16540,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_731: # %cond.load837
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16557,7 +16557,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_732: # %cond.load841
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16574,7 +16574,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_733: # %cond.load845
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16591,7 +16591,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_734: # %cond.load849
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16608,7 +16608,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_735: # %cond.load853
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16625,7 +16625,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_736: # %cond.load857
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16642,7 +16642,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_737: # %cond.load861
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16659,7 +16659,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_738: # %cond.load865
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16676,7 +16676,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_739: # %cond.load869
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16693,7 +16693,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_740: # %cond.load873
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16710,7 +16710,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_741: # %cond.load877
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16727,7 +16727,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_742: # %cond.load881
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16744,7 +16744,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_743: # %cond.load885
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16761,7 +16761,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_744: # %cond.load889
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16778,7 +16778,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_745: # %cond.load893
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16795,7 +16795,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_746: # %cond.load897
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16812,7 +16812,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_747: # %cond.load901
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16829,7 +16829,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_748: # %cond.load905
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16846,7 +16846,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_749: # %cond.load909
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16863,7 +16863,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_750: # %cond.load913
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16880,7 +16880,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_751: # %cond.load917
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16897,7 +16897,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_752: # %cond.load921
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16914,7 +16914,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_753: # %cond.load925
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16931,7 +16931,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_754: # %cond.load929
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16948,7 +16948,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_755: # %cond.load933
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16965,7 +16965,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_756: # %cond.load937
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16982,7 +16982,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_757: # %cond.load941
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16999,7 +16999,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_758: # %cond.load945
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17016,7 +17016,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_759: # %cond.load949
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17033,7 +17033,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_760: # %cond.load953
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17050,7 +17050,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_761: # %cond.load957
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17067,7 +17067,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_762: # %cond.load961
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17084,7 +17084,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_763: # %cond.load965
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17101,7 +17101,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_764: # %cond.load969
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17118,7 +17118,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_765: # %cond.load973
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17135,7 +17135,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_766: # %cond.load977
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17152,7 +17152,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_767: # %cond.load981
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17169,7 +17169,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_768: # %cond.load985
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17186,7 +17186,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_769: # %cond.load989
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17203,7 +17203,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_770: # %cond.load993
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17220,7 +17220,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_771: # %cond.load997
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17237,7 +17237,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_772: # %cond.load1001
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17254,7 +17254,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_773: # %cond.load1005
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
index cf3bf00e307b8e..a983e8b99ebe7b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
@@ -13,7 +13,7 @@ define <vscale x 4 x i32> @extract_nxv8i32_nxv4i32_0(<vscale x 8 x i32> %vec) {
define <vscale x 4 x i32> @extract_nxv8i32_nxv4i32_4(<vscale x 8 x i32> %vec) {
; CHECK-LABEL: extract_nxv8i32_nxv4i32_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, i64 4)
@@ -31,7 +31,7 @@ define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_0(<vscale x 8 x i32> %vec) {
define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_2(<vscale x 8 x i32> %vec) {
; CHECK-LABEL: extract_nxv8i32_nxv2i32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 2)
@@ -41,7 +41,7 @@ define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_2(<vscale x 8 x i32> %vec) {
define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_4(<vscale x 8 x i32> %vec) {
; CHECK-LABEL: extract_nxv8i32_nxv2i32_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 4)
@@ -51,7 +51,7 @@ define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_4(<vscale x 8 x i32> %vec) {
define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_6(<vscale x 8 x i32> %vec) {
; CHECK-LABEL: extract_nxv8i32_nxv2i32_6:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 6)
@@ -69,7 +69,7 @@ define <vscale x 8 x i32> @extract_nxv16i32_nxv8i32_0(<vscale x 16 x i32> %vec)
define <vscale x 8 x i32> @extract_nxv16i32_nxv8i32_8(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv8i32_8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%c = call <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
@@ -87,7 +87,7 @@ define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_0(<vscale x 16 x i32> %vec)
define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_4(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv4i32_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 4)
@@ -97,7 +97,7 @@ define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_4(<vscale x 16 x i32> %vec)
define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_8(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv4i32_8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v12
; CHECK-NEXT: ret
%c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
@@ -107,7 +107,7 @@ define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_8(<vscale x 16 x i32> %vec)
define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_12(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv4i32_12:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: ret
%c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 12)
@@ -125,7 +125,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_0(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_2(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 2)
@@ -135,7 +135,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_2(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_4(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 4)
@@ -145,7 +145,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_4(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_6(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_6:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 6)
@@ -155,7 +155,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_6(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_8(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v12
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
@@ -165,7 +165,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_8(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_10(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_10:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 10)
@@ -175,7 +175,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_10(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_12(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_12:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v14
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 12)
@@ -185,7 +185,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_12(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_14(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_14:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v15
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 14)
@@ -239,7 +239,7 @@ define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_15(<vscale x 16 x i32> %vec)
define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_2(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv1i32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 2)
@@ -303,7 +303,7 @@ define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_6(<vscale x 32 x i8> %vec) {
define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_8(<vscale x 32 x i8> %vec) {
; CHECK-LABEL: extract_nxv32i8_nxv2i8_8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 8)
@@ -374,7 +374,7 @@ define <vscale x 2 x half> @extract_nxv2f16_nxv16f16_2(<vscale x 16 x half> %vec
define <vscale x 2 x half> @extract_nxv2f16_nxv16f16_4(<vscale x 16 x half> %vec) {
; CHECK-LABEL: extract_nxv2f16_nxv16f16_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%c = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 4)
@@ -522,7 +522,7 @@ define <vscale x 2 x bfloat> @extract_nxv2bf16_nxv16bf16_2(<vscale x 16 x bfloat
define <vscale x 2 x bfloat> @extract_nxv2bf16_nxv16bf16_4(<vscale x 16 x bfloat> %vec) {
; CHECK-LABEL: extract_nxv2bf16_nxv16bf16_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%c = call <vscale x 2 x bfloat> @llvm.vector.extract.nxv2bf16.nxv16bf16(<vscale x 16 x bfloat> %vec, i64 4)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
index 0f6eb79616c1d8..114efd12d6f24c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
@@ -16,7 +16,7 @@ define <512 x i8> @single_source(<512 x i8> %a) {
; CHECK-NEXT: addi s0, sp, 1536
; CHECK-NEXT: .cfi_def_cfa s0, 0
; CHECK-NEXT: andi sp, sp, -512
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: li a0, 512
; CHECK-NEXT: addi a1, sp, 512
@@ -105,7 +105,7 @@ define <512 x i8> @two_source(<512 x i8> %a, <512 x i8> %b) {
; CHECK-NEXT: addi s0, sp, 1536
; CHECK-NEXT: .cfi_def_cfa s0, 0
; CHECK-NEXT: andi sp, sp, -512
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: li a0, 512
; CHECK-NEXT: addi a1, sp, 512
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
index 8a1a5931771373..59f060d1bd7da7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
@@ -1659,7 +1659,7 @@ define <15 x i64> @vp_bitreverse_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroex
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 24 * vlenb
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv8r.v v24, v8
; RV32-NEXT: lui a2, 1044480
; RV32-NEXT: lui a3, 61681
@@ -2056,7 +2056,7 @@ define <16 x i64> @vp_bitreverse_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroex
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 24 * vlenb
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv8r.v v24, v8
; RV32-NEXT: lui a2, 1044480
; RV32-NEXT: lui a3, 61681
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
index d9071b3da03caa..6ee630f5b9a8c4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
@@ -180,7 +180,7 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; CHECK-NEXT: .cfi_offset ra, -8
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: li a1, 2
; CHECK-NEXT: vmv8r.v v8, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
index d2d996ac6e3ea8..b2bd974da34f40 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
@@ -180,7 +180,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; CHECK-NEXT: .cfi_offset ra, -8
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: li a1, 2
; CHECK-NEXT: vmv8r.v v8, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
index bedde48358d615..515f94fb3d42d4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
@@ -194,7 +194,7 @@ define <8 x half> @vp_ceil_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
;
; ZVFHMIN-LABEL: vp_ceil_v8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -262,7 +262,7 @@ declare <16 x half> @llvm.vp.ceil.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -282,7 +282,7 @@ define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %e
;
; ZVFHMIN-LABEL: vp_ceil_v16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -434,7 +434,7 @@ declare <8 x float> @llvm.vp.ceil.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_ceil_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -479,7 +479,7 @@ declare <16 x float> @llvm.vp.ceil.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_ceil_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -566,7 +566,7 @@ declare <4 x double> @llvm.vp.ceil.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_ceil_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -611,7 +611,7 @@ declare <8 x double> @llvm.vp.ceil.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_ceil_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -656,7 +656,7 @@ declare <15 x double> @llvm.vp.ceil.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_ceil_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -701,7 +701,7 @@ declare <16 x double> @llvm.vp.ceil.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_ceil_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -752,7 +752,7 @@ define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
index 46f74b59b96586..6edbc6afe0410a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
@@ -1796,7 +1796,7 @@ define <32 x i64> @vp_ctpop_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 24 * vlenb
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv8r.v v24, v16
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: lui a2, 209715
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
index 3bb11b6a5f2e02..b3cf6fbd1bd767 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
@@ -194,7 +194,7 @@ define <8 x half> @vp_floor_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
;
; ZVFHMIN-LABEL: vp_floor_v8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -262,7 +262,7 @@ declare <16 x half> @llvm.vp.floor.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -282,7 +282,7 @@ define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
;
; ZVFHMIN-LABEL: vp_floor_v16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -434,7 +434,7 @@ declare <8 x float> @llvm.vp.floor.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_floor_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -479,7 +479,7 @@ declare <16 x float> @llvm.vp.floor.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_floor_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -566,7 +566,7 @@ declare <4 x double> @llvm.vp.floor.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_floor_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -611,7 +611,7 @@ declare <8 x double> @llvm.vp.floor.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_floor_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -656,7 +656,7 @@ declare <15 x double> @llvm.vp.floor.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_floor_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -701,7 +701,7 @@ declare <16 x double> @llvm.vp.floor.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_floor_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -752,7 +752,7 @@ define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
index 435dfcebc0f9d9..ed71a9f0d7016a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
@@ -13,7 +13,7 @@ declare <2 x half> @llvm.vp.maximum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32)
define <2 x half> @vfmax_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_v2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -27,7 +27,7 @@ define <2 x half> @vfmax_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i
;
; ZVFHMIN-LABEL: vfmax_vv_v2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -85,7 +85,7 @@ declare <4 x half> @llvm.vp.maximum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
define <4 x half> @vfmax_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_v4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -99,7 +99,7 @@ define <4 x half> @vfmax_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i
;
; ZVFHMIN-LABEL: vfmax_vv_v4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -157,7 +157,7 @@ declare <8 x half> @llvm.vp.maximum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32)
define <8 x half> @vfmax_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_v8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -171,7 +171,7 @@ define <8 x half> @vfmax_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i
;
; ZVFHMIN-LABEL: vfmax_vv_v8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -231,7 +231,7 @@ declare <16 x half> @llvm.vp.maximum.v16f16(<16 x half>, <16 x half>, <16 x i1>,
define <16 x half> @vfmax_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -247,7 +247,7 @@ define <16 x half> @vfmax_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1>
;
; ZVFHMIN-LABEL: vfmax_vv_v16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -307,7 +307,7 @@ declare <2 x float> @llvm.vp.maximum.v2f32(<2 x float>, <2 x float>, <2 x i1>, i
define <2 x float> @vfmax_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -342,7 +342,7 @@ declare <4 x float> @llvm.vp.maximum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i
define <4 x float> @vfmax_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -377,7 +377,7 @@ declare <8 x float> @llvm.vp.maximum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i
define <8 x float> @vfmax_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -414,7 +414,7 @@ declare <16 x float> @llvm.vp.maximum.v16f32(<16 x float>, <16 x float>, <16 x i
define <16 x float> @vfmax_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -451,7 +451,7 @@ declare <2 x double> @llvm.vp.maximum.v2f64(<2 x double>, <2 x double>, <2 x i1>
define <2 x double> @vfmax_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -486,7 +486,7 @@ declare <4 x double> @llvm.vp.maximum.v4f64(<4 x double>, <4 x double>, <4 x i1>
define <4 x double> @vfmax_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -523,7 +523,7 @@ declare <8 x double> @llvm.vp.maximum.v8f64(<8 x double>, <8 x double>, <8 x i1>
define <8 x double> @vfmax_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -566,7 +566,7 @@ define <16 x double> @vfmax_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
@@ -618,7 +618,7 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
index e3c80b050f1132..fdd02f84d413b2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
@@ -13,7 +13,7 @@ declare <2 x half> @llvm.vp.minimum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32)
define <2 x half> @vfmin_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_v2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -27,7 +27,7 @@ define <2 x half> @vfmin_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i
;
; ZVFHMIN-LABEL: vfmin_vv_v2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -85,7 +85,7 @@ declare <4 x half> @llvm.vp.minimum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
define <4 x half> @vfmin_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_v4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -99,7 +99,7 @@ define <4 x half> @vfmin_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i
;
; ZVFHMIN-LABEL: vfmin_vv_v4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -157,7 +157,7 @@ declare <8 x half> @llvm.vp.minimum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32)
define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_v8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -171,7 +171,7 @@ define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i
;
; ZVFHMIN-LABEL: vfmin_vv_v8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -231,7 +231,7 @@ declare <16 x half> @llvm.vp.minimum.v16f16(<16 x half>, <16 x half>, <16 x i1>,
define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -247,7 +247,7 @@ define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1>
;
; ZVFHMIN-LABEL: vfmin_vv_v16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -307,7 +307,7 @@ declare <2 x float> @llvm.vp.minimum.v2f32(<2 x float>, <2 x float>, <2 x i1>, i
define <2 x float> @vfmin_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -342,7 +342,7 @@ declare <4 x float> @llvm.vp.minimum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i
define <4 x float> @vfmin_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -377,7 +377,7 @@ declare <8 x float> @llvm.vp.minimum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i
define <8 x float> @vfmin_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -414,7 +414,7 @@ declare <16 x float> @llvm.vp.minimum.v16f32(<16 x float>, <16 x float>, <16 x i
define <16 x float> @vfmin_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -451,7 +451,7 @@ declare <2 x double> @llvm.vp.minimum.v2f64(<2 x double>, <2 x double>, <2 x i1>
define <2 x double> @vfmin_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -486,7 +486,7 @@ declare <4 x double> @llvm.vp.minimum.v4f64(<4 x double>, <4 x double>, <4 x i1>
define <4 x double> @vfmin_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -523,7 +523,7 @@ declare <8 x double> @llvm.vp.minimum.v8f64(<8 x double>, <8 x double>, <8 x i1>
define <8 x double> @vfmin_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -566,7 +566,7 @@ define <16 x double> @vfmin_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
@@ -618,7 +618,7 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
index 72397f64275d6b..c1acf789a7b428 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
@@ -38,7 +38,7 @@ define <4 x float> @interleave_v2f32(<2 x float> %x, <2 x float> %y) {
define <4 x double> @interleave_v2f64(<2 x double> %x, <2 x double> %y) {
; V128-LABEL: interleave_v2f64:
; V128: # %bb.0:
-; V128-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; V128-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; V128-NEXT: vmv1r.v v12, v9
; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; V128-NEXT: vid.v v9
@@ -243,7 +243,7 @@ define <64 x float> @interleave_v32f32(<32 x float> %x, <32 x float> %y) {
; V128-NEXT: slli a0, a0, 3
; V128-NEXT: sub sp, sp, a0
; V128-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; V128-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; V128-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; V128-NEXT: vmv8r.v v24, v16
; V128-NEXT: vmv8r.v v16, v8
; V128-NEXT: vmv8r.v v8, v24
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll
index 88994afc1d7c93..1b37e4af1bba03 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll
@@ -97,7 +97,7 @@ declare <32 x float> @llvm.vp.fptrunc.v32f64.v32f32(<32 x double>, <32 x i1>, i3
define <32 x float> @vfptrunc_v32f32_v32f64(<32 x double> %a, <32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vfptrunc_v32f32_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll
index d929be2f2371aa..1c4cf15c07f272 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll
@@ -712,7 +712,7 @@ define <16 x i64> @fshl_v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index 9febb7db0ad08a..872f7ebf245f8e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -133,7 +133,7 @@ define <vscale x 2 x i32> @insert_nxv8i32_v4i32_0(<vscale x 2 x i32> %vec, <4 x
;
; VLS-LABEL: insert_nxv8i32_v4i32_0:
; VLS: # %bb.0:
-; VLS-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; VLS-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; VLS-NEXT: vmv1r.v v8, v9
; VLS-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v4i32(<vscale x 2 x i32> %vec, <4 x i32> %subvec, i64 0)
@@ -144,7 +144,7 @@ define <vscale x 2 x i32> @insert_nxv8i32_v4i32_0(<vscale x 2 x i32> %vec, <4 x
define <4 x i32> @insert_v4i32_v4i32_0(<4 x i32> %vec, <4 x i32> %subvec) {
; CHECK-LABEL: insert_v4i32_v4i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vector.insert.v4i32.v4i32(<4 x i32> %vec, <4 x i32> %subvec, i64 0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
index f27497c8ac43b2..215097b6f220cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
@@ -51,7 +51,7 @@ define <4 x i32> @interleave_v2i32(<2 x i32> %x, <2 x i32> %y) {
define <4 x i64> @interleave_v2i64(<2 x i64> %x, <2 x i64> %y) {
; V128-LABEL: interleave_v2i64:
; V128: # %bb.0:
-; V128-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; V128-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; V128-NEXT: vmv1r.v v12, v9
; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; V128-NEXT: vid.v v9
@@ -412,7 +412,7 @@ define <64 x i32> @interleave_v32i32(<32 x i32> %x, <32 x i32> %y) {
; V128-NEXT: slli a0, a0, 3
; V128-NEXT: sub sp, sp, a0
; V128-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; V128-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; V128-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; V128-NEXT: vmv8r.v v24, v16
; V128-NEXT: vmv8r.v v16, v8
; V128-NEXT: vmv8r.v v8, v24
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index d3bc60d39ed1cb..1d85293003b333 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -556,13 +556,13 @@ define <4 x i8> @mgather_truemask_v4i8(<4 x ptr> %ptrs, <4 x i8> %passthru) {
define <4 x i8> @mgather_falsemask_v4i8(<4 x ptr> %ptrs, <4 x i8> %passthru) {
; RV32-LABEL: mgather_falsemask_v4i8:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4i8:
; RV64V: # %bb.0:
-; RV64V-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64V-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64V-NEXT: vmv1r.v v8, v10
; RV64V-NEXT: ret
;
@@ -779,7 +779,7 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-NEXT: .LBB12_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB12_14: # %cond.load4
@@ -1252,13 +1252,13 @@ define <4 x i16> @mgather_truemask_v4i16(<4 x ptr> %ptrs, <4 x i16> %passthru) {
define <4 x i16> @mgather_falsemask_v4i16(<4 x ptr> %ptrs, <4 x i16> %passthru) {
; RV32-LABEL: mgather_falsemask_v4i16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4i16:
; RV64V: # %bb.0:
-; RV64V-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64V-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64V-NEXT: vmv1r.v v8, v10
; RV64V-NEXT: ret
;
@@ -1486,7 +1486,7 @@ define <8 x i16> @mgather_baseidx_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-NEXT: .LBB23_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB23_14: # %cond.load4
@@ -1635,7 +1635,7 @@ define <8 x i16> @mgather_baseidx_sext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-NEXT: .LBB24_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB24_14: # %cond.load4
@@ -1788,7 +1788,7 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-NEXT: .LBB25_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB25_14: # %cond.load4
@@ -1935,7 +1935,7 @@ define <8 x i16> @mgather_baseidx_v8i16(ptr %base, <8 x i16> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-NEXT: .LBB26_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB26_14: # %cond.load4
@@ -2300,13 +2300,13 @@ define <4 x i32> @mgather_truemask_v4i32(<4 x ptr> %ptrs, <4 x i32> %passthru) {
define <4 x i32> @mgather_falsemask_v4i32(<4 x ptr> %ptrs, <4 x i32> %passthru) {
; RV32-LABEL: mgather_falsemask_v4i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4i32:
; RV64V: # %bb.0:
-; RV64V-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64V-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64V-NEXT: vmv1r.v v8, v10
; RV64V-NEXT: ret
;
@@ -2533,7 +2533,7 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB35_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB35_14: # %cond.load4
@@ -2681,7 +2681,7 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB36_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB36_14: # %cond.load4
@@ -2836,7 +2836,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB37_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB37_14: # %cond.load4
@@ -2989,7 +2989,7 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB38_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB38_14: # %cond.load4
@@ -3138,7 +3138,7 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB39_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB39_14: # %cond.load4
@@ -3294,7 +3294,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB40_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB40_14: # %cond.load4
@@ -3440,7 +3440,7 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB41_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB41_14: # %cond.load4
@@ -3792,13 +3792,13 @@ define <4 x i64> @mgather_truemask_v4i64(<4 x ptr> %ptrs, <4 x i64> %passthru) {
define <4 x i64> @mgather_falsemask_v4i64(<4 x ptr> %ptrs, <4 x i64> %passthru) {
; RV32V-LABEL: mgather_falsemask_v4i64:
; RV32V: # %bb.0:
-; RV32V-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32V-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32V-NEXT: vmv2r.v v8, v10
; RV32V-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4i64:
; RV64V: # %bb.0:
-; RV64V-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64V-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64V-NEXT: vmv2r.v v8, v10
; RV64V-NEXT: ret
;
@@ -7085,13 +7085,13 @@ define <4 x bfloat> @mgather_truemask_v4bf16(<4 x ptr> %ptrs, <4 x bfloat> %pass
define <4 x bfloat> @mgather_falsemask_v4bf16(<4 x ptr> %ptrs, <4 x bfloat> %passthru) {
; RV32-LABEL: mgather_falsemask_v4bf16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4bf16:
; RV64V: # %bb.0:
-; RV64V-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64V-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64V-NEXT: vmv1r.v v8, v10
; RV64V-NEXT: ret
;
@@ -7319,7 +7319,7 @@ define <8 x bfloat> @mgather_baseidx_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, <8 x
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-NEXT: .LBB64_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB64_14: # %cond.load4
@@ -7468,7 +7468,7 @@ define <8 x bfloat> @mgather_baseidx_sext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-NEXT: .LBB65_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB65_14: # %cond.load4
@@ -7621,7 +7621,7 @@ define <8 x bfloat> @mgather_baseidx_zext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-NEXT: .LBB66_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB66_14: # %cond.load4
@@ -7768,7 +7768,7 @@ define <8 x bfloat> @mgather_baseidx_v8bf16(ptr %base, <8 x i16> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-NEXT: .LBB67_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB67_14: # %cond.load4
@@ -8097,13 +8097,13 @@ define <4 x half> @mgather_truemask_v4f16(<4 x ptr> %ptrs, <4 x half> %passthru)
define <4 x half> @mgather_falsemask_v4f16(<4 x ptr> %ptrs, <4 x half> %passthru) {
; RV32-LABEL: mgather_falsemask_v4f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4f16:
; RV64V: # %bb.0:
-; RV64V-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64V-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64V-NEXT: vmv1r.v v8, v10
; RV64V-NEXT: ret
;
@@ -8424,7 +8424,7 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-ZVFH-NEXT: .LBB74_13: # %else20
-; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFH-NEXT: ret
; RV64ZVE32F-ZVFH-NEXT: .LBB74_14: # %cond.load4
@@ -8548,7 +8548,7 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_13: # %else20
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFHMIN-NEXT: ret
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_14: # %cond.load4
@@ -8697,7 +8697,7 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-ZVFH-NEXT: .LBB75_13: # %else20
-; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFH-NEXT: ret
; RV64ZVE32F-ZVFH-NEXT: .LBB75_14: # %cond.load4
@@ -8821,7 +8821,7 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_13: # %else20
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFHMIN-NEXT: ret
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_14: # %cond.load4
@@ -8974,7 +8974,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-ZVFH-NEXT: .LBB76_13: # %else20
-; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFH-NEXT: ret
; RV64ZVE32F-ZVFH-NEXT: .LBB76_14: # %cond.load4
@@ -9106,7 +9106,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_13: # %else20
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFHMIN-NEXT: ret
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_14: # %cond.load4
@@ -9253,7 +9253,7 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-ZVFH-NEXT: .LBB77_13: # %else20
-; RV64ZVE32F-ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFH-NEXT: ret
; RV64ZVE32F-ZVFH-NEXT: .LBB77_14: # %cond.load4
@@ -9369,7 +9369,7 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_13: # %else20
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFHMIN-NEXT: ret
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_14: # %cond.load4
@@ -9606,13 +9606,13 @@ define <4 x float> @mgather_truemask_v4f32(<4 x ptr> %ptrs, <4 x float> %passthr
define <4 x float> @mgather_falsemask_v4f32(<4 x ptr> %ptrs, <4 x float> %passthru) {
; RV32-LABEL: mgather_falsemask_v4f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4f32:
; RV64V: # %bb.0:
-; RV64V-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64V-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64V-NEXT: vmv1r.v v8, v10
; RV64V-NEXT: ret
;
@@ -9839,7 +9839,7 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB84_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB84_14: # %cond.load4
@@ -9987,7 +9987,7 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB85_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB85_14: # %cond.load4
@@ -10142,7 +10142,7 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB86_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB86_14: # %cond.load4
@@ -10295,7 +10295,7 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB87_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB87_14: # %cond.load4
@@ -10444,7 +10444,7 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB88_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB88_14: # %cond.load4
@@ -10600,7 +10600,7 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB89_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB89_14: # %cond.load4
@@ -10746,7 +10746,7 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB90_13: # %else20
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB90_14: # %cond.load4
@@ -11056,13 +11056,13 @@ define <4 x double> @mgather_truemask_v4f64(<4 x ptr> %ptrs, <4 x double> %passt
define <4 x double> @mgather_falsemask_v4f64(<4 x ptr> %ptrs, <4 x double> %passthru) {
; RV32V-LABEL: mgather_falsemask_v4f64:
; RV32V: # %bb.0:
-; RV32V-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32V-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32V-NEXT: vmv2r.v v8, v10
; RV32V-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4f64:
; RV64V: # %bb.0:
-; RV64V-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64V-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64V-NEXT: vmv2r.v v8, v10
; RV64V-NEXT: ret
;
@@ -13623,7 +13623,7 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 15
; RV64ZVE32F-NEXT: .LBB107_24: # %else44
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB107_25: # %cond.load4
@@ -14010,7 +14010,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 31
; RV64ZVE32F-NEXT: .LBB108_48: # %else92
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB108_49: # %cond.load4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
index dd746f8cd92b7f..c4f78288ef110b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
@@ -318,7 +318,7 @@ define <128 x i16> @masked_load_v128i16(ptr %a, <128 x i1> %mask) {
define <256 x i8> @masked_load_v256i8(ptr %a, <256 x i1> %mask) {
; CHECK-LABEL: masked_load_v256i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
index 1ace69a5c3cccd..b32c8a9dbcf368 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
@@ -135,7 +135,7 @@ declare <16 x half> @llvm.vp.nearbyint.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_nearbyint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI6_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -264,7 +264,7 @@ declare <8 x float> @llvm.vp.nearbyint.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_nearbyint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -309,7 +309,7 @@ declare <16 x float> @llvm.vp.nearbyint.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_nearbyint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -396,7 +396,7 @@ declare <4 x double> @llvm.vp.nearbyint.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_nearbyint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -441,7 +441,7 @@ declare <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_nearbyint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -486,7 +486,7 @@ declare <15 x double> @llvm.vp.nearbyint.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_nearbyint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -531,7 +531,7 @@ declare <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_nearbyint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -576,7 +576,7 @@ declare <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll
index b278c89042c683..1d1a97e7a72c30 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll
@@ -23,7 +23,7 @@ declare i1 @llvm.vp.reduce.or.v1i1(i1, <1 x i1>, <1 x i1>, i32)
define zeroext i1 @vpreduce_or_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_v1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -40,7 +40,7 @@ declare i1 @llvm.vp.reduce.xor.v1i1(i1, <1 x i1>, <1 x i1>, i32)
define zeroext i1 @vpreduce_xor_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_v1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -73,7 +73,7 @@ declare i1 @llvm.vp.reduce.or.v2i1(i1, <2 x i1>, <2 x i1>, i32)
define zeroext i1 @vpreduce_or_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_v2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -90,7 +90,7 @@ declare i1 @llvm.vp.reduce.xor.v2i1(i1, <2 x i1>, <2 x i1>, i32)
define zeroext i1 @vpreduce_xor_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_v2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -123,7 +123,7 @@ declare i1 @llvm.vp.reduce.or.v4i1(i1, <4 x i1>, <4 x i1>, i32)
define zeroext i1 @vpreduce_or_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_v4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -140,7 +140,7 @@ declare i1 @llvm.vp.reduce.xor.v4i1(i1, <4 x i1>, <4 x i1>, i32)
define zeroext i1 @vpreduce_xor_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_v4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -173,7 +173,7 @@ declare i1 @llvm.vp.reduce.or.v8i1(i1, <8 x i1>, <8 x i1>, i32)
define zeroext i1 @vpreduce_or_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_v8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -190,7 +190,7 @@ declare i1 @llvm.vp.reduce.xor.v8i1(i1, <8 x i1>, <8 x i1>, i32)
define zeroext i1 @vpreduce_xor_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_v8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -239,7 +239,7 @@ declare i1 @llvm.vp.reduce.and.v256i1(i1, <256 x i1>, <256 x i1>, i32)
define zeroext i1 @vpreduce_and_v256i1(i1 zeroext %s, <256 x i1> %v, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_and_v256i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v9
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: li a3, 128
@@ -248,7 +248,7 @@ define zeroext i1 @vpreduce_and_v256i1(i1 zeroext %s, <256 x i1> %v, <256 x i1>
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB14_2:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmnot.m v9, v9
@@ -275,7 +275,7 @@ declare i1 @llvm.vp.reduce.or.v16i1(i1, <16 x i1>, <16 x i1>, i32)
define zeroext i1 @vpreduce_or_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_v16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -292,7 +292,7 @@ declare i1 @llvm.vp.reduce.xor.v16i1(i1, <16 x i1>, <16 x i1>, i32)
define zeroext i1 @vpreduce_xor_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_v16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -309,7 +309,7 @@ declare i1 @llvm.vp.reduce.add.v1i1(i1, <1 x i1>, <1 x i1>, i32)
define zeroext i1 @vpreduce_add_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_v1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -326,7 +326,7 @@ declare i1 @llvm.vp.reduce.add.v2i1(i1, <2 x i1>, <2 x i1>, i32)
define zeroext i1 @vpreduce_add_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_v2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -343,7 +343,7 @@ declare i1 @llvm.vp.reduce.add.v4i1(i1, <4 x i1>, <4 x i1>, i32)
define zeroext i1 @vpreduce_add_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_v4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -360,7 +360,7 @@ declare i1 @llvm.vp.reduce.add.v8i1(i1, <8 x i1>, <8 x i1>, i32)
define zeroext i1 @vpreduce_add_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_v8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -377,7 +377,7 @@ declare i1 @llvm.vp.reduce.add.v16i1(i1, <16 x i1>, <16 x i1>, i32)
define zeroext i1 @vpreduce_add_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_v16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -506,7 +506,7 @@ declare i1 @llvm.vp.reduce.smin.v1i1(i1, <1 x i1>, <1 x i1>, i32)
define zeroext i1 @vpreduce_smin_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -523,7 +523,7 @@ declare i1 @llvm.vp.reduce.smin.v2i1(i1, <2 x i1>, <2 x i1>, i32)
define zeroext i1 @vpreduce_smin_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -540,7 +540,7 @@ declare i1 @llvm.vp.reduce.smin.v4i1(i1, <4 x i1>, <4 x i1>, i32)
define zeroext i1 @vpreduce_smin_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -557,7 +557,7 @@ declare i1 @llvm.vp.reduce.smin.v8i1(i1, <8 x i1>, <8 x i1>, i32)
define zeroext i1 @vpreduce_smin_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -574,7 +574,7 @@ declare i1 @llvm.vp.reduce.smin.v16i1(i1, <16 x i1>, <16 x i1>, i32)
define zeroext i1 @vpreduce_smin_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -591,7 +591,7 @@ declare i1 @llvm.vp.reduce.smin.v32i1(i1, <32 x i1>, <32 x i1>, i32)
define zeroext i1 @vpreduce_smin_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v32i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -608,7 +608,7 @@ declare i1 @llvm.vp.reduce.smin.v64i1(i1, <64 x i1>, <64 x i1>, i32)
define zeroext i1 @vpreduce_smin_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v64i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -625,7 +625,7 @@ declare i1 @llvm.vp.reduce.umax.v1i1(i1, <1 x i1>, <1 x i1>, i32)
define zeroext i1 @vpreduce_umax_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -642,7 +642,7 @@ declare i1 @llvm.vp.reduce.umax.v2i1(i1, <2 x i1>, <2 x i1>, i32)
define zeroext i1 @vpreduce_umax_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -659,7 +659,7 @@ declare i1 @llvm.vp.reduce.umax.v4i1(i1, <4 x i1>, <4 x i1>, i32)
define zeroext i1 @vpreduce_umax_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -676,7 +676,7 @@ declare i1 @llvm.vp.reduce.umax.v8i1(i1, <8 x i1>, <8 x i1>, i32)
define zeroext i1 @vpreduce_umax_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -693,7 +693,7 @@ declare i1 @llvm.vp.reduce.umax.v16i1(i1, <16 x i1>, <16 x i1>, i32)
define zeroext i1 @vpreduce_umax_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -710,7 +710,7 @@ declare i1 @llvm.vp.reduce.umax.v32i1(i1, <32 x i1>, <32 x i1>, i32)
define zeroext i1 @vpreduce_umax_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v32i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -727,7 +727,7 @@ declare i1 @llvm.vp.reduce.umax.v64i1(i1, <64 x i1>, <64 x i1>, i32)
define zeroext i1 @vpreduce_umax_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v64i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
index 237b871648de41..b6157da37e4311 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
@@ -123,7 +123,7 @@ declare <16 x half> @llvm.vp.rint.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_rint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI6_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -240,7 +240,7 @@ declare <8 x float> @llvm.vp.rint.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_rint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -281,7 +281,7 @@ declare <16 x float> @llvm.vp.rint.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_rint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -360,7 +360,7 @@ declare <4 x double> @llvm.vp.rint.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_rint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -401,7 +401,7 @@ declare <8 x double> @llvm.vp.rint.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_rint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -442,7 +442,7 @@ declare <15 x double> @llvm.vp.rint.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_rint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -483,7 +483,7 @@ declare <16 x double> @llvm.vp.rint.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_rint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -524,7 +524,7 @@ declare <32 x double> @llvm.vp.rint.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_rint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
index d308023c507a8b..4afb96f211a880 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
@@ -194,7 +194,7 @@ define <8 x half> @vp_round_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
;
; ZVFHMIN-LABEL: vp_round_v8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -262,7 +262,7 @@ declare <16 x half> @llvm.vp.round.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -282,7 +282,7 @@ define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
;
; ZVFHMIN-LABEL: vp_round_v16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -434,7 +434,7 @@ declare <8 x float> @llvm.vp.round.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_round_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -479,7 +479,7 @@ declare <16 x float> @llvm.vp.round.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_round_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -566,7 +566,7 @@ declare <4 x double> @llvm.vp.round.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_round_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -611,7 +611,7 @@ declare <8 x double> @llvm.vp.round.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_round_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -656,7 +656,7 @@ declare <15 x double> @llvm.vp.round.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_round_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -701,7 +701,7 @@ declare <16 x double> @llvm.vp.round.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_round_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -752,7 +752,7 @@ define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
index f5374a377f1dc8..ef7cb78ff03a71 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
@@ -194,7 +194,7 @@ define <8 x half> @vp_roundeven_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %
;
; ZVFHMIN-LABEL: vp_roundeven_v8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -262,7 +262,7 @@ declare <16 x half> @llvm.vp.roundeven.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -282,7 +282,7 @@ define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroe
;
; ZVFHMIN-LABEL: vp_roundeven_v16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -434,7 +434,7 @@ declare <8 x float> @llvm.vp.roundeven.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_roundeven_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -479,7 +479,7 @@ declare <16 x float> @llvm.vp.roundeven.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_roundeven_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -566,7 +566,7 @@ declare <4 x double> @llvm.vp.roundeven.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_roundeven_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -611,7 +611,7 @@ declare <8 x double> @llvm.vp.roundeven.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_roundeven_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -656,7 +656,7 @@ declare <15 x double> @llvm.vp.roundeven.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_roundeven_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -701,7 +701,7 @@ declare <16 x double> @llvm.vp.roundeven.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_roundeven_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -752,7 +752,7 @@ define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
index b3cc29e914a3fd..29bffdf43289df 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
@@ -194,7 +194,7 @@ define <8 x half> @vp_roundtozero_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext
;
; ZVFHMIN-LABEL: vp_roundtozero_v8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -262,7 +262,7 @@ declare <16 x half> @llvm.vp.roundtozero.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -282,7 +282,7 @@ define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zer
;
; ZVFHMIN-LABEL: vp_roundtozero_v16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -434,7 +434,7 @@ declare <8 x float> @llvm.vp.roundtozero.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_roundtozero_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -479,7 +479,7 @@ declare <16 x float> @llvm.vp.roundtozero.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_roundtozero_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -566,7 +566,7 @@ declare <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_roundtozero_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -611,7 +611,7 @@ declare <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_roundtozero_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -656,7 +656,7 @@ declare <15 x double> @llvm.vp.roundtozero.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_roundtozero_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -701,7 +701,7 @@ declare <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_roundtozero_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -752,7 +752,7 @@ define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
index e060062b5e3828..db28e6a8ad2e7f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
@@ -598,7 +598,7 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1>
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -649,7 +649,7 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1>
define <256 x i1> @icmp_eq_vx_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_v256i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
@@ -679,7 +679,7 @@ define <256 x i1> @icmp_eq_vx_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 z
define <256 x i1> @icmp_eq_vx_swap_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_swap_v256i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
index 16e54255b460da..96012c21bc5671 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
@@ -8,7 +8,7 @@
define <8 x i32> @concat_2xv4i32(<4 x i32> %a, <4 x i32> %b) {
; VLA-LABEL: concat_2xv4i32:
; VLA: # %bb.0:
-; VLA-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; VLA-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; VLA-NEXT: vmv1r.v v10, v9
; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; VLA-NEXT: vslideup.vi v8, v10, 4
@@ -33,7 +33,7 @@ define <8 x i32> @concat_4xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x
;
; VLS-LABEL: concat_4xv2i32:
; VLS: # %bb.0:
-; VLS-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; VLS-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; VLS-NEXT: vmv1r.v v13, v10
; VLS-NEXT: vmv1r.v v12, v8
; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma
@@ -64,7 +64,7 @@ define <8 x i32> @concat_8xv1i32(<1 x i32> %a, <1 x i32> %b, <1 x i32> %c, <1 x
;
; VLS-LABEL: concat_8xv1i32:
; VLS: # %bb.0:
-; VLS-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; VLS-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; VLS-NEXT: vmv1r.v v17, v12
; VLS-NEXT: vmv1r.v v16, v8
; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
@@ -92,7 +92,7 @@ define <8 x i32> @concat_8xv1i32(<1 x i32> %a, <1 x i32> %b, <1 x i32> %c, <1 x
define <16 x i32> @concat_2xv8i32(<8 x i32> %a, <8 x i32> %b) {
; VLA-LABEL: concat_2xv8i32:
; VLA: # %bb.0:
-; VLA-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; VLA-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; VLA-NEXT: vmv2r.v v12, v10
; VLA-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; VLA-NEXT: vslideup.vi v8, v12, 8
@@ -108,7 +108,7 @@ define <16 x i32> @concat_2xv8i32(<8 x i32> %a, <8 x i32> %b) {
define <16 x i32> @concat_4xv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
; VLA-LABEL: concat_4xv4i32:
; VLA: # %bb.0:
-; VLA-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; VLA-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; VLA-NEXT: vmv1r.v v14, v11
; VLA-NEXT: vmv1r.v v12, v10
; VLA-NEXT: vmv1r.v v10, v9
@@ -145,7 +145,7 @@ define <16 x i32> @concat_8xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x
;
; VLS-LABEL: concat_8xv2i32:
; VLS: # %bb.0:
-; VLS-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; VLS-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; VLS-NEXT: vmv1r.v v19, v14
; VLS-NEXT: vmv1r.v v18, v12
; VLS-NEXT: vmv1r.v v17, v10
@@ -170,7 +170,7 @@ define <16 x i32> @concat_8xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x
define <32 x i32> @concat_2xv16i32(<16 x i32> %a, <16 x i32> %b) {
; VLA-LABEL: concat_2xv16i32:
; VLA: # %bb.0:
-; VLA-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; VLA-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; VLA-NEXT: vmv4r.v v16, v12
; VLA-NEXT: li a0, 32
; VLA-NEXT: vsetvli zero, a0, e32, m8, ta, ma
@@ -187,7 +187,7 @@ define <32 x i32> @concat_2xv16i32(<16 x i32> %a, <16 x i32> %b) {
define <32 x i32> @concat_4xv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; VLA-LABEL: concat_4xv8i32:
; VLA: # %bb.0:
-; VLA-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; VLA-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; VLA-NEXT: vmv2r.v v20, v14
; VLA-NEXT: vmv2r.v v16, v12
; VLA-NEXT: vmv2r.v v12, v10
@@ -211,7 +211,7 @@ define <32 x i32> @concat_4xv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x
define <32 x i32> @concat_8xv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d, <4 x i32> %e, <4 x i32> %f, <4 x i32> %g, <4 x i32> %h) {
; VLA-LABEL: concat_8xv4i32:
; VLA: # %bb.0:
-; VLA-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; VLA-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; VLA-NEXT: vmv1r.v v18, v15
; VLA-NEXT: vmv1r.v v20, v14
; VLA-NEXT: vmv1r.v v14, v13
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
index 6f4c2d6cb64122..99d4b8880cc204 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
@@ -108,7 +108,7 @@ define <4 x i64> @m2_splat_into_identity(<4 x i64> %v1) vscale_range(2,2) {
define <4 x i64> @m2_broadcast_i128(<4 x i64> %v1) vscale_range(2,2) {
; CHECK-LABEL: m2_broadcast_i128:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: ret
%res = shufflevector <4 x i64> %v1, <4 x i64> poison, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -118,7 +118,7 @@ define <4 x i64> @m2_broadcast_i128(<4 x i64> %v1) vscale_range(2,2) {
define <8 x i64> @m4_broadcast_i128(<8 x i64> %v1) vscale_range(2,2) {
; CHECK-LABEL: m4_broadcast_i128:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
index 3cc6d814d9a49c..ec235f75c0cc3c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
@@ -966,7 +966,7 @@ define <16 x i8> @reverse_v16i8_2(<8 x i8> %a, <8 x i8> %b) {
define <32 x i8> @reverse_v32i8_2(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: reverse_v32i8_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
@@ -1036,7 +1036,7 @@ define <8 x i16> @reverse_v8i16_2(<4 x i16> %a, <4 x i16> %b) {
define <16 x i16> @reverse_v16i16_2(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: reverse_v16i16_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -1062,7 +1062,7 @@ define <16 x i16> @reverse_v16i16_2(<8 x i16> %a, <8 x i16> %b) {
define <32 x i16> @reverse_v32i16_2(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: reverse_v32i16_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -1119,7 +1119,7 @@ define <4 x i32> @reverse_v4i32_2(<2 x i32> %a, < 2 x i32> %b) {
define <8 x i32> @reverse_v8i32_2(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: reverse_v8i32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -1146,7 +1146,7 @@ define <8 x i32> @reverse_v8i32_2(<4 x i32> %a, <4 x i32> %b) {
define <16 x i32> @reverse_v16i32_2(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: reverse_v16i32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -1175,7 +1175,7 @@ define <16 x i32> @reverse_v16i32_2(<8 x i32> %a, <8 x i32> %b) {
define <32 x i32> @reverse_v32i32_2(<16 x i32> %a, <16 x i32> %b) {
; CHECK-LABEL: reverse_v32i32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -1225,7 +1225,7 @@ define <4 x i64> @reverse_v4i64_2(<2 x i64> %a, < 2 x i64> %b) {
define <8 x i64> @reverse_v8i64_2(<4 x i64> %a, <4 x i64> %b) {
; CHECK-LABEL: reverse_v8i64_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
@@ -1296,7 +1296,7 @@ define <8 x half> @reverse_v8f16_2(<4 x half> %a, <4 x half> %b) {
define <16 x half> @reverse_v16f16_2(<8 x half> %a, <8 x half> %b) {
; CHECK-LABEL: reverse_v16f16_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -1369,7 +1369,7 @@ define <4 x float> @reverse_v4f32_2(<2 x float> %a, <2 x float> %b) {
define <8 x float> @reverse_v8f32_2(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: reverse_v8f32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -1396,7 +1396,7 @@ define <8 x float> @reverse_v8f32_2(<4 x float> %a, <4 x float> %b) {
define <16 x float> @reverse_v16f32_2(<8 x float> %a, <8 x float> %b) {
; CHECK-LABEL: reverse_v16f32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -1440,7 +1440,7 @@ define <4 x double> @reverse_v4f64_2(<2 x double> %a, < 2 x double> %b) {
define <8 x double> @reverse_v8f64_2(<4 x double> %a, <4 x double> %b) {
; CHECK-LABEL: reverse_v8f64_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll
index 8fecbaacaead44..f0360c2bdd20b0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll
@@ -415,7 +415,7 @@ define <4 x i8> @vslide1up_4xi8_neg_incorrect_insert3(<4 x i8> %v, i8 %b) {
define <2 x i8> @vslide1up_4xi8_neg_length_changing(<4 x i8> %v, i8 %b) {
; CHECK-LABEL: vslide1up_4xi8_neg_length_changing:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
index df08d19b187187..d98396538e8ee0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
@@ -62,7 +62,7 @@ define void @gather_masked(ptr noalias nocapture %A, ptr noalias nocapture reado
; CHECK-NEXT: li a4, 5
; CHECK-NEXT: .LBB1_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, mu
; CHECK-NEXT: vlse8.v v9, (a1), a4, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
index db13b174b30983..d65957b7a86ae3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
@@ -542,7 +542,7 @@ declare <3 x double> @llvm.experimental.vp.strided.load.v3f64.p0.i32(ptr, i32, <
define <32 x double> @strided_vpload_v32f64(ptr %ptr, i32 signext %stride, <32 x i1> %m, i32 zeroext %evl) nounwind {
; CHECK-LABEL: strided_vpload_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: li a4, 16
; CHECK-NEXT: mv a3, a2
@@ -599,7 +599,7 @@ declare <32 x double> @llvm.experimental.vp.strided.load.v32f64.p0.i32(ptr, i32,
define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_load_v33f64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv1r.v v8, v0
; CHECK-RV32-NEXT: li a5, 32
; CHECK-RV32-NEXT: mv a3, a4
@@ -650,7 +650,7 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask
;
; CHECK-RV64-LABEL: strided_load_v33f64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv1r.v v8, v0
; CHECK-RV64-NEXT: li a5, 32
; CHECK-RV64-NEXT: mv a4, a3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
index 6ae811a33b5935..61a4c4f6bc3d80 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
@@ -53,7 +53,7 @@ declare <128 x i7> @llvm.vp.trunc.v128i7.v128i16(<128 x i16>, <128 x i1>, i32)
define <128 x i7> @vtrunc_v128i7_v128i16(<128 x i16> %a, <128 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_v128i7_v128i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: li a1, 64
; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
@@ -232,7 +232,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: mul a2, a2, a3
; CHECK-NEXT: sub sp, sp, a2
; CHECK-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 72 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: li a3, 24
@@ -543,7 +543,7 @@ declare <32 x i32> @llvm.vp.trunc.v32i32.v32i64(<32 x i64>, <32 x i1>, i32)
define <32 x i32> @vtrunc_v32i32_v32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_v32i32_v32i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
index 023d954a4f36ee..a6e2b04d3b2712 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
@@ -105,7 +105,7 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> %
; RV32-SLOW-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; RV32-SLOW-NEXT: vslideup.vi v9, v8, 1
; RV32-SLOW-NEXT: .LBB4_4: # %else2
-; RV32-SLOW-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-SLOW-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-SLOW-NEXT: vmv1r.v v8, v9
; RV32-SLOW-NEXT: ret
;
@@ -139,7 +139,7 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> %
; RV64-SLOW-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; RV64-SLOW-NEXT: vslideup.vi v9, v8, 1
; RV64-SLOW-NEXT: .LBB4_4: # %else2
-; RV64-SLOW-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-SLOW-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-SLOW-NEXT: vmv1r.v v8, v9
; RV64-SLOW-NEXT: ret
;
@@ -191,7 +191,7 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> %
; RV32-SLOW-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-SLOW-NEXT: vslideup.vi v9, v8, 1
; RV32-SLOW-NEXT: .LBB5_4: # %else2
-; RV32-SLOW-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-SLOW-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-SLOW-NEXT: vmv1r.v v8, v9
; RV32-SLOW-NEXT: ret
;
@@ -223,7 +223,7 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> %
; RV64-SLOW-NEXT: vmv.s.x v8, a0
; RV64-SLOW-NEXT: vslideup.vi v9, v8, 1
; RV64-SLOW-NEXT: .LBB5_4: # %else2
-; RV64-SLOW-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-SLOW-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-SLOW-NEXT: vmv1r.v v8, v9
; RV64-SLOW-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
index c1e91850f053fc..1dba245b6769c0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
@@ -363,7 +363,7 @@ declare <256 x i8> @llvm.vp.add.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32)
define <256 x i8> @vadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_v258i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
index ccd9f291a8659f..ee650d46cedb07 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
@@ -267,7 +267,7 @@ declare <256 x i8> @llvm.vp.smax.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32)
define <256 x i8> @vmax_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_v258i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
index b2969eeed05678..34d7ce30d245c2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
@@ -266,7 +266,7 @@ declare <256 x i8> @llvm.vp.umax.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32)
define <256 x i8> @vmaxu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_v258i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
index 6464749e70181f..88258d93592abe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
@@ -267,7 +267,7 @@ declare <256 x i8> @llvm.vp.smin.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32)
define <256 x i8> @vmin_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_v258i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
index 50e95053574973..31d9f83a844e71 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
@@ -266,7 +266,7 @@ declare <256 x i8> @llvm.vp.umin.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32)
define <256 x i8> @vminu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_v258i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
index 0fceb00daa3ca7..33093e53062e7e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
@@ -2617,7 +2617,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(ptr %base, <32 x i32>
define <32 x double> @vpgather_baseidx_v32f64(ptr %base, <32 x i64> %idxs, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_v32f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v7, v0
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vnsrl.wi v24, v16, 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
index 6104115862adcd..c64c77d2bdf034 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
@@ -394,7 +394,7 @@ declare <33 x double> @llvm.vp.load.v33f64.p0(ptr, <33 x i1>, i32)
define <33 x double> @vpload_v33f64(ptr %ptr, <33 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v33f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: li a4, 32
; CHECK-NEXT: mv a3, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
index ac93a306dd877b..2f659dfbe8f73b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
@@ -1181,7 +1181,7 @@ define <32 x double> @vpmerge_vv_v32f64(<32 x double> %va, <32 x double> %vb, <3
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
index 44c42ad7d638ba..3edba04af25595 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
@@ -372,7 +372,7 @@ declare <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>,
define <256 x i8> @vsadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsadd_vi_v258i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
index cffe69d2222429..e7a553fc61689b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
@@ -368,7 +368,7 @@ declare <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>,
define <256 x i8> @vsaddu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsaddu_vi_v258i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
index 0d541917e9f6c5..59ef583a382d14 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
@@ -163,7 +163,7 @@ define <256 x i8> @select_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i3
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v6, v8
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: li a2, 128
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
index 75db312b0c6e8c..7e703fafa9ab4f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
@@ -384,7 +384,7 @@ declare <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>,
define <256 x i8> @vssub_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vssub_vi_v258i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: addi a3, a1, -128
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
index 03c3c93cfca54f..125b6ba22ebb86 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
@@ -379,7 +379,7 @@ declare <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>,
define <256 x i8> @vssubu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vssubu_vi_v258i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: addi a3, a1, -128
diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
index 18911a5f7568bb..ae033fe94716fa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
@@ -117,7 +117,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.floor.nxv4bf16(<vscale x 4 x bfloat>, <vs
define <vscale x 4 x bfloat> @vp_floor_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv4bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -170,7 +170,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.floor.nxv8bf16(<vscale x 8 x bfloat>, <vs
define <vscale x 8 x bfloat> @vp_floor_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv8bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -223,7 +223,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.floor.nxv16bf16(<vscale x 16 x bfloat>,
define <vscale x 16 x bfloat> @vp_floor_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv16bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -282,7 +282,7 @@ define <vscale x 32 x bfloat> @vp_floor_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -586,7 +586,7 @@ define <vscale x 4 x half> @vp_floor_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vp_floor_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -654,7 +654,7 @@ declare <vscale x 8 x half> @llvm.vp.floor.nxv8f16(<vscale x 8 x half>, <vscale
define <vscale x 8 x half> @vp_floor_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -674,7 +674,7 @@ define <vscale x 8 x half> @vp_floor_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vp_floor_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -742,7 +742,7 @@ declare <vscale x 16 x half> @llvm.vp.floor.nxv16f16(<vscale x 16 x half>, <vsca
define <vscale x 16 x half> @vp_floor_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -762,7 +762,7 @@ define <vscale x 16 x half> @vp_floor_nxv16f16(<vscale x 16 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vp_floor_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -830,7 +830,7 @@ declare <vscale x 32 x half> @llvm.vp.floor.nxv32f16(<vscale x 32 x half>, <vsca
define <vscale x 32 x half> @vp_floor_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -856,7 +856,7 @@ define <vscale x 32 x half> @vp_floor_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -1079,7 +1079,7 @@ declare <vscale x 4 x float> @llvm.vp.floor.nxv4f32(<vscale x 4 x float>, <vscal
define <vscale x 4 x float> @vp_floor_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1124,7 +1124,7 @@ declare <vscale x 8 x float> @llvm.vp.floor.nxv8f32(<vscale x 8 x float>, <vscal
define <vscale x 8 x float> @vp_floor_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1169,7 +1169,7 @@ declare <vscale x 16 x float> @llvm.vp.floor.nxv16f32(<vscale x 16 x float>, <vs
define <vscale x 16 x float> @vp_floor_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1256,7 +1256,7 @@ declare <vscale x 2 x double> @llvm.vp.floor.nxv2f64(<vscale x 2 x double>, <vsc
define <vscale x 2 x double> @vp_floor_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1301,7 +1301,7 @@ declare <vscale x 4 x double> @llvm.vp.floor.nxv4f64(<vscale x 4 x double>, <vsc
define <vscale x 4 x double> @vp_floor_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1346,7 +1346,7 @@ declare <vscale x 7 x double> @llvm.vp.floor.nxv7f64(<vscale x 7 x double>, <vsc
define <vscale x 7 x double> @vp_floor_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1391,7 +1391,7 @@ declare <vscale x 8 x double> @llvm.vp.floor.nxv8f64(<vscale x 8 x double>, <vsc
define <vscale x 8 x double> @vp_floor_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1443,7 +1443,7 @@ define <vscale x 16 x double> @vp_floor_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
index 616146e3675956..9269339fa001fb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
@@ -153,7 +153,7 @@ define <vscale x 32 x bfloat> @vfmax_nxv32bf16_vv(<vscale x 32 x bfloat> %a, <vs
; ZVFH-NEXT: slli a0, a0, 1
; ZVFH-NEXT: add a0, a0, a1
; ZVFH-NEXT: sub sp, sp, a0
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv8r.v v24, v16
; ZVFH-NEXT: csrr a0, vlenb
; ZVFH-NEXT: slli a0, a0, 3
@@ -229,7 +229,7 @@ define <vscale x 32 x bfloat> @vfmax_nxv32bf16_vv(<vscale x 32 x bfloat> %a, <vs
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
@@ -500,7 +500,7 @@ define <vscale x 32 x half> @vfmax_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
index 752c9dcf51c4b8..c51181fe133551 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
@@ -19,7 +19,7 @@ declare <vscale x 1 x bfloat> @llvm.vp.maximum.nxv1bf16(<vscale x 1 x bfloat>, <
define <vscale x 1 x bfloat> @vfmax_vv_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv1bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v11, v8
@@ -66,7 +66,7 @@ declare <vscale x 2 x bfloat> @llvm.vp.maximum.nxv2bf16(<vscale x 2 x bfloat>, <
define <vscale x 2 x bfloat> @vfmax_vv_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv2bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v11, v8
@@ -113,7 +113,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.maximum.nxv4bf16(<vscale x 4 x bfloat>, <
define <vscale x 4 x bfloat> @vfmax_vv_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv4bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -162,7 +162,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.maximum.nxv8bf16(<vscale x 8 x bfloat>, <
define <vscale x 8 x bfloat> @vfmax_vv_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv8bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -217,7 +217,7 @@ define <vscale x 16 x bfloat> @vfmax_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8
@@ -570,7 +570,7 @@ declare <vscale x 1 x half> @llvm.vp.maximum.nxv1f16(<vscale x 1 x half>, <vscal
define <vscale x 1 x half> @vfmax_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_nxv1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -584,7 +584,7 @@ define <vscale x 1 x half> @vfmax_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmax_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -642,7 +642,7 @@ declare <vscale x 2 x half> @llvm.vp.maximum.nxv2f16(<vscale x 2 x half>, <vscal
define <vscale x 2 x half> @vfmax_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_nxv2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -656,7 +656,7 @@ define <vscale x 2 x half> @vfmax_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmax_vv_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -714,7 +714,7 @@ declare <vscale x 4 x half> @llvm.vp.maximum.nxv4f16(<vscale x 4 x half>, <vscal
define <vscale x 4 x half> @vfmax_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_nxv4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -728,7 +728,7 @@ define <vscale x 4 x half> @vfmax_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmax_vv_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -788,7 +788,7 @@ declare <vscale x 8 x half> @llvm.vp.maximum.nxv8f16(<vscale x 8 x half>, <vscal
define <vscale x 8 x half> @vfmax_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -804,7 +804,7 @@ define <vscale x 8 x half> @vfmax_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmax_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -864,7 +864,7 @@ declare <vscale x 16 x half> @llvm.vp.maximum.nxv16f16(<vscale x 16 x half>, <vs
define <vscale x 16 x half> @vfmax_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -886,7 +886,7 @@ define <vscale x 16 x half> @vfmax_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
@@ -976,7 +976,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFH-NEXT: slli a1, a1, 3
; ZVFH-NEXT: sub sp, sp, a1
; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v7, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vmfeq.vv v25, v8, v8, v0.t
@@ -1293,7 +1293,7 @@ declare <vscale x 1 x float> @llvm.vp.maximum.nxv1f32(<vscale x 1 x float>, <vsc
define <vscale x 1 x float> @vfmax_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -1328,7 +1328,7 @@ declare <vscale x 2 x float> @llvm.vp.maximum.nxv2f32(<vscale x 2 x float>, <vsc
define <vscale x 2 x float> @vfmax_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -1363,7 +1363,7 @@ declare <vscale x 4 x float> @llvm.vp.maximum.nxv4f32(<vscale x 4 x float>, <vsc
define <vscale x 4 x float> @vfmax_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -1400,7 +1400,7 @@ declare <vscale x 8 x float> @llvm.vp.maximum.nxv8f32(<vscale x 8 x float>, <vsc
define <vscale x 8 x float> @vfmax_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -1437,7 +1437,7 @@ declare <vscale x 1 x double> @llvm.vp.maximum.nxv1f64(<vscale x 1 x double>, <v
define <vscale x 1 x double> @vfmax_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv1f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -1472,7 +1472,7 @@ declare <vscale x 2 x double> @llvm.vp.maximum.nxv2f64(<vscale x 2 x double>, <v
define <vscale x 2 x double> @vfmax_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -1509,7 +1509,7 @@ declare <vscale x 4 x double> @llvm.vp.maximum.nxv4f64(<vscale x 4 x double>, <v
define <vscale x 4 x double> @vfmax_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -1552,7 +1552,7 @@ define <vscale x 8 x double> @vfmax_vv_nxv8f64(<vscale x 8 x double> %va, <vscal
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
@@ -1605,7 +1605,7 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
index ddbb513c6d9aad..6145f0c1fc3a93 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
@@ -153,7 +153,7 @@ define <vscale x 32 x bfloat> @vfmin_nxv32bf16_vv(<vscale x 32 x bfloat> %a, <vs
; ZVFH-NEXT: slli a0, a0, 1
; ZVFH-NEXT: add a0, a0, a1
; ZVFH-NEXT: sub sp, sp, a0
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv8r.v v24, v16
; ZVFH-NEXT: csrr a0, vlenb
; ZVFH-NEXT: slli a0, a0, 3
@@ -229,7 +229,7 @@ define <vscale x 32 x bfloat> @vfmin_nxv32bf16_vv(<vscale x 32 x bfloat> %a, <vs
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
@@ -500,7 +500,7 @@ define <vscale x 32 x half> @vfmin_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
index 58377365949fe4..64df5dd4fa0745 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
@@ -19,7 +19,7 @@ declare <vscale x 1 x bfloat> @llvm.vp.minimum.nxv1bf16(<vscale x 1 x bfloat>, <
define <vscale x 1 x bfloat> @vfmin_vv_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv1bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v11, v8
@@ -66,7 +66,7 @@ declare <vscale x 2 x bfloat> @llvm.vp.minimum.nxv2bf16(<vscale x 2 x bfloat>, <
define <vscale x 2 x bfloat> @vfmin_vv_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv2bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v11, v8
@@ -113,7 +113,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.minimum.nxv4bf16(<vscale x 4 x bfloat>, <
define <vscale x 4 x bfloat> @vfmin_vv_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv4bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -162,7 +162,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.minimum.nxv8bf16(<vscale x 8 x bfloat>, <
define <vscale x 8 x bfloat> @vfmin_vv_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv8bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -217,7 +217,7 @@ define <vscale x 16 x bfloat> @vfmin_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8
@@ -570,7 +570,7 @@ declare <vscale x 1 x half> @llvm.vp.minimum.nxv1f16(<vscale x 1 x half>, <vscal
define <vscale x 1 x half> @vfmin_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_nxv1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -584,7 +584,7 @@ define <vscale x 1 x half> @vfmin_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmin_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -642,7 +642,7 @@ declare <vscale x 2 x half> @llvm.vp.minimum.nxv2f16(<vscale x 2 x half>, <vscal
define <vscale x 2 x half> @vfmin_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_nxv2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -656,7 +656,7 @@ define <vscale x 2 x half> @vfmin_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmin_vv_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -714,7 +714,7 @@ declare <vscale x 4 x half> @llvm.vp.minimum.nxv4f16(<vscale x 4 x half>, <vscal
define <vscale x 4 x half> @vfmin_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_nxv4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -728,7 +728,7 @@ define <vscale x 4 x half> @vfmin_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmin_vv_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -788,7 +788,7 @@ declare <vscale x 8 x half> @llvm.vp.minimum.nxv8f16(<vscale x 8 x half>, <vscal
define <vscale x 8 x half> @vfmin_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -804,7 +804,7 @@ define <vscale x 8 x half> @vfmin_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmin_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -864,7 +864,7 @@ declare <vscale x 16 x half> @llvm.vp.minimum.nxv16f16(<vscale x 16 x half>, <vs
define <vscale x 16 x half> @vfmin_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -886,7 +886,7 @@ define <vscale x 16 x half> @vfmin_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
@@ -976,7 +976,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFH-NEXT: slli a1, a1, 3
; ZVFH-NEXT: sub sp, sp, a1
; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v7, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vmfeq.vv v25, v8, v8, v0.t
@@ -1293,7 +1293,7 @@ declare <vscale x 1 x float> @llvm.vp.minimum.nxv1f32(<vscale x 1 x float>, <vsc
define <vscale x 1 x float> @vfmin_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -1328,7 +1328,7 @@ declare <vscale x 2 x float> @llvm.vp.minimum.nxv2f32(<vscale x 2 x float>, <vsc
define <vscale x 2 x float> @vfmin_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -1363,7 +1363,7 @@ declare <vscale x 4 x float> @llvm.vp.minimum.nxv4f32(<vscale x 4 x float>, <vsc
define <vscale x 4 x float> @vfmin_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -1400,7 +1400,7 @@ declare <vscale x 8 x float> @llvm.vp.minimum.nxv8f32(<vscale x 8 x float>, <vsc
define <vscale x 8 x float> @vfmin_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -1437,7 +1437,7 @@ declare <vscale x 1 x double> @llvm.vp.minimum.nxv1f64(<vscale x 1 x double>, <v
define <vscale x 1 x double> @vfmin_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv1f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -1472,7 +1472,7 @@ declare <vscale x 2 x double> @llvm.vp.minimum.nxv2f64(<vscale x 2 x double>, <v
define <vscale x 2 x double> @vfmin_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -1509,7 +1509,7 @@ declare <vscale x 4 x double> @llvm.vp.minimum.nxv4f64(<vscale x 4 x double>, <v
define <vscale x 4 x double> @vfmin_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -1552,7 +1552,7 @@ define <vscale x 8 x double> @vfmin_vv_nxv8f64(<vscale x 8 x double> %va, <vscal
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
@@ -1605,7 +1605,7 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
diff --git a/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll b/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
index 948c8e0ecb9b95..85eb82e2b09e50 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
@@ -18,7 +18,7 @@ define i32 @test(i32 %size, ptr %add.ptr, i64 %const) {
; RV32-NEXT: .LBB0_1: # %for.body
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
; RV32-NEXT: th.lrb a0, a1, a0, 0
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v9, v8
; RV32-NEXT: vsetivli zero, 8, e8, m1, tu, ma
; RV32-NEXT: vmv.s.x v9, a0
@@ -46,7 +46,7 @@ define i32 @test(i32 %size, ptr %add.ptr, i64 %const) {
; RV64-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: th.lrb a0, a1, a0, 0
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v9, v8
; RV64-NEXT: vsetivli zero, 8, e8, m1, tu, ma
; RV64-NEXT: vmv.s.x v9, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
index a8c8ad86452d45..64c22eac537748 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
@@ -703,7 +703,7 @@ define <vscale x 16 x i32> @fshl_v16i32(<vscale x 16 x i32> %a, <vscale x 16 x i
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: li a0, 31
@@ -883,7 +883,7 @@ define <vscale x 7 x i64> @fshl_v7i64(<vscale x 7 x i64> %a, <vscale x 7 x i64>
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: li a0, 63
@@ -955,7 +955,7 @@ define <vscale x 8 x i64> @fshl_v8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64>
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: li a0, 63
@@ -991,7 +991,7 @@ define <vscale x 16 x i64> @fshr_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a3, 24
@@ -1177,7 +1177,7 @@ define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 5
diff --git a/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll b/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll
index 328c1ab3bddff9..758c0beb4decc1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll
@@ -365,13 +365,13 @@ entry:
define <vscale x 4 x i8> @test_specify_reg_mf2(<vscale x 4 x i8> %in, <vscale x 4 x i8> %in2) nounwind {
; CHECK-LABEL: test_specify_reg_mf2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v2, v9
; CHECK-NEXT: vmv1r.v v1, v8
; CHECK-NEXT: #APP
; CHECK-NEXT: vadd.vv v0, v1, v2
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: ret
entry:
@@ -382,13 +382,13 @@ entry:
define <vscale x 8 x i8> @test_specify_reg_m1(<vscale x 8 x i8> %in, <vscale x 8 x i8> %in2) nounwind {
; CHECK-LABEL: test_specify_reg_m1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v2, v9
; CHECK-NEXT: vmv1r.v v1, v8
; CHECK-NEXT: #APP
; CHECK-NEXT: vadd.vv v0, v1, v2
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: ret
entry:
@@ -399,13 +399,13 @@ entry:
define <vscale x 16 x i8> @test_specify_reg_m2(<vscale x 16 x i8> %in, <vscale x 16 x i8> %in2) nounwind {
; CHECK-LABEL: test_specify_reg_m2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v4, v10
; CHECK-NEXT: vmv2r.v v2, v8
; CHECK-NEXT: #APP
; CHECK-NEXT: vadd.vv v0, v2, v4
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v0
; CHECK-NEXT: ret
entry:
@@ -416,7 +416,7 @@ entry:
define <vscale x 1 x i1> @test_specify_reg_mask(<vscale x 1 x i1> %in, <vscale x 1 x i1> %in2) nounwind {
; CHECK-LABEL: test_specify_reg_mask:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v2, v8
; CHECK-NEXT: vmv1r.v v1, v0
; CHECK-NEXT: #APP
diff --git a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
index 7b654d37234793..68aefd71014c6f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
@@ -5,7 +5,7 @@
define <vscale x 8 x i32> @insert_nxv8i32_nxv4i32_0(<vscale x 8 x i32> %vec, <vscale x 4 x i32> %subvec) {
; CHECK-LABEL: insert_nxv8i32_nxv4i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 4 x i32> %subvec, i64 0)
@@ -15,7 +15,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv4i32_0(<vscale x 8 x i32> %vec, <vs
define <vscale x 8 x i32> @insert_nxv8i32_nxv4i32_4(<vscale x 8 x i32> %vec, <vscale x 4 x i32> %subvec) {
; CHECK-LABEL: insert_nxv8i32_nxv4i32_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v10, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 4 x i32> %subvec, i64 4)
@@ -25,7 +25,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv4i32_4(<vscale x 8 x i32> %vec, <vs
define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_0(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv8i32_nxv2i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec, i64 0)
@@ -35,7 +35,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_0(<vscale x 8 x i32> %vec, <vs
define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_2(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv8i32_nxv2i32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec, i64 2)
@@ -45,7 +45,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_2(<vscale x 8 x i32> %vec, <vs
define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_4(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv8i32_nxv2i32_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec, i64 4)
@@ -55,7 +55,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_4(<vscale x 8 x i32> %vec, <vs
define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_6(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv8i32_nxv2i32_6:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec, i64 6)
@@ -92,7 +92,7 @@ define <vscale x 4 x i8> @insert_nxv1i8_nxv4i8_3(<vscale x 4 x i8> %vec, <vscale
define <vscale x 16 x i32> @insert_nxv16i32_nxv8i32_0(<vscale x 16 x i32> %vec, <vscale x 8 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv8i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 8 x i32> %subvec, i64 0)
@@ -102,7 +102,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv8i32_0(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv8i32_8(<vscale x 16 x i32> %vec, <vscale x 8 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv8i32_8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v12, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 8 x i32> %subvec, i64 8)
@@ -112,7 +112,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv8i32_8(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_0(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv4i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec, i64 0)
@@ -122,7 +122,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_0(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_4(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv4i32_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v10, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec, i64 4)
@@ -132,7 +132,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_4(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_8(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv4i32_8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v12, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec, i64 8)
@@ -142,7 +142,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_8(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_12(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv4i32_12:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v14, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec, i64 12)
@@ -152,7 +152,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_12(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_0(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 0)
@@ -162,7 +162,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_0(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_2(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 2)
@@ -172,7 +172,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_2(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_4(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 4)
@@ -182,7 +182,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_4(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_6(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_6:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 6)
@@ -192,7 +192,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_6(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_8(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 8)
@@ -202,7 +202,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_8(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_10(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_10:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 10)
@@ -212,7 +212,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_10(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_12(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_12:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v14, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 12)
@@ -222,7 +222,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_12(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_14(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_14:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v15, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 14)
@@ -532,7 +532,7 @@ define <vscale x 2 x i64> @insert_nxv2i64_nxv3i64(<3 x i64> %sv) #0 {
define <vscale x 8 x i32> @insert_insert_combine(<2 x i32> %subvec) {
; CHECK-LABEL: insert_insert_combine:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: ret
%inner = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v2i32(<vscale x 4 x i32> undef, <2 x i32> %subvec, i64 0)
@@ -545,7 +545,7 @@ define <vscale x 8 x i32> @insert_insert_combine(<2 x i32> %subvec) {
define <vscale x 8 x i32> @insert_insert_combine2(<vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_insert_combine2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: ret
%inner = call <vscale x 4 x i32> @llvm.vector.insert.nxv2i32.nxv4i32(<vscale x 4 x i32> undef, <vscale x 2 x i32> %subvec, i64 0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll
index 86c2c07704cfd7..a1b66771a5f692 100644
--- a/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll
@@ -55,7 +55,7 @@ declare <vscale x 8 x i64> @llvm.vp.llrint.nxv8i64.nxv8f32(<vscale x 8 x float>,
define <vscale x 16 x i64> @llrint_nxv16i64_nxv16f32(<vscale x 16 x float> %x, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: llrint_nxv16i64_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll
index 4650cea23dd00f..963886d278ba80 100644
--- a/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll
@@ -117,7 +117,7 @@ define <vscale x 16 x iXLen> @lrint_nxv16f32(<vscale x 16 x float> %x, <vscale x
;
; RV64-i64-LABEL: lrint_nxv16f32:
; RV64-i64: # %bb.0:
-; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-i64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-i64-NEXT: vmv1r.v v24, v0
; RV64-i64-NEXT: csrr a1, vlenb
; RV64-i64-NEXT: srli a2, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
index d06c809833f81a..4888d7090a1345 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
@@ -1288,7 +1288,7 @@ declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
@@ -1313,7 +1313,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
@@ -1445,7 +1445,7 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
index ec774b6f641b43..4f75d693a3f22f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
@@ -221,13 +221,13 @@ define <vscale x 4 x i8> @mgather_truemask_nxv4i8(<vscale x 4 x ptr> %ptrs, <vsc
define <vscale x 4 x i8> @mgather_falsemask_nxv4i8(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i8> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4i8:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4i8:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i8> %passthru)
@@ -444,13 +444,13 @@ define <vscale x 4 x i16> @mgather_truemask_nxv4i16(<vscale x 4 x ptr> %ptrs, <v
define <vscale x 4 x i16> @mgather_falsemask_nxv4i16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i16> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4i16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4i16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i16> %passthru)
@@ -690,13 +690,13 @@ define <vscale x 4 x i32> @mgather_truemask_nxv4i32(<vscale x 4 x ptr> %ptrs, <v
define <vscale x 4 x i32> @mgather_falsemask_nxv4i32(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i32> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv2r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i32> %passthru)
@@ -955,7 +955,7 @@ define <vscale x 4 x i64> @mgather_truemask_nxv4i64(<vscale x 4 x ptr> %ptrs, <v
define <vscale x 4 x i64> @mgather_falsemask_nxv4i64(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i64> %passthru) {
; CHECK-LABEL: mgather_falsemask_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i64> %passthru)
@@ -1239,7 +1239,7 @@ define void @mgather_nxv16i64(<vscale x 8 x ptr> %ptrs0, <vscale x 8 x ptr> %ptr
; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; RV64-NEXT: addi a3, sp, 16
; RV64-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv8r.v v16, v8
; RV64-NEXT: vl8re64.v v24, (a0)
; RV64-NEXT: csrr a0, vlenb
@@ -1356,13 +1356,13 @@ define <vscale x 4 x bfloat> @mgather_truemask_nxv4bf16(<vscale x 4 x ptr> %ptrs
define <vscale x 4 x bfloat> @mgather_falsemask_nxv4bf16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x bfloat> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4bf16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4bf16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x bfloat> %passthru)
@@ -1559,13 +1559,13 @@ define <vscale x 4 x half> @mgather_truemask_nxv4f16(<vscale x 4 x ptr> %ptrs, <
define <vscale x 4 x half> @mgather_falsemask_nxv4f16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x half> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x half> %passthru)
@@ -1761,13 +1761,13 @@ define <vscale x 4 x float> @mgather_truemask_nxv4f32(<vscale x 4 x ptr> %ptrs,
define <vscale x 4 x float> @mgather_falsemask_nxv4f32(<vscale x 4 x ptr> %ptrs, <vscale x 4 x float> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv2r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x float> %passthru)
@@ -2026,7 +2026,7 @@ define <vscale x 4 x double> @mgather_truemask_nxv4f64(<vscale x 4 x ptr> %ptrs,
define <vscale x 4 x double> @mgather_falsemask_nxv4f64(<vscale x 4 x ptr> %ptrs, <vscale x 4 x double> %passthru) {
; CHECK-LABEL: mgather_falsemask_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x double> %passthru)
@@ -2332,7 +2332,7 @@ define <vscale x 32 x i8> @mgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8>
;
; RV64-LABEL: mgather_baseidx_nxv32i8:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v16, v0
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v24, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
index 0ad841f0ebf869..549a0d88bf3e72 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
@@ -2009,7 +2009,7 @@ define void @mscatter_baseidx_nxv16i16_nxv16f64(<vscale x 8 x double> %val0, <vs
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv8r.v v16, v8
; RV32-NEXT: vl4re16.v v8, (a1)
; RV32-NEXT: csrr a1, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
index 185a91271992e3..87fe5b25972051 100644
--- a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
@@ -1169,7 +1169,7 @@ define <vscale x 32 x i8> @reverse_nxv32i8(<vscale x 32 x i8> %a) {
define <vscale x 64 x i8> @reverse_nxv64i8(<vscale x 64 x i8> %a) {
; RV32-BITS-UNKNOWN-LABEL: reverse_nxv64i8:
; RV32-BITS-UNKNOWN: # %bb.0:
-; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-BITS-UNKNOWN-NEXT: vmv8r.v v16, v8
; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb
; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1
@@ -1189,7 +1189,7 @@ define <vscale x 64 x i8> @reverse_nxv64i8(<vscale x 64 x i8> %a) {
;
; RV32-BITS-256-LABEL: reverse_nxv64i8:
; RV32-BITS-256: # %bb.0:
-; RV32-BITS-256-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-BITS-256-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-BITS-256-NEXT: vmv8r.v v16, v8
; RV32-BITS-256-NEXT: csrr a0, vlenb
; RV32-BITS-256-NEXT: addi a0, a0, -1
@@ -1208,7 +1208,7 @@ define <vscale x 64 x i8> @reverse_nxv64i8(<vscale x 64 x i8> %a) {
;
; RV32-BITS-512-LABEL: reverse_nxv64i8:
; RV32-BITS-512: # %bb.0:
-; RV32-BITS-512-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-BITS-512-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-BITS-512-NEXT: vmv8r.v v16, v8
; RV32-BITS-512-NEXT: csrr a0, vlenb
; RV32-BITS-512-NEXT: addi a0, a0, -1
@@ -1227,7 +1227,7 @@ define <vscale x 64 x i8> @reverse_nxv64i8(<vscale x 64 x i8> %a) {
;
; RV64-BITS-UNKNOWN-LABEL: reverse_nxv64i8:
; RV64-BITS-UNKNOWN: # %bb.0:
-; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-BITS-UNKNOWN-NEXT: vmv8r.v v16, v8
; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb
; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @reverse_nxv64i8(<vscale x 64 x i8> %a) {
;
; RV64-BITS-256-LABEL: reverse_nxv64i8:
; RV64-BITS-256: # %bb.0:
-; RV64-BITS-256-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-BITS-256-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-BITS-256-NEXT: vmv8r.v v16, v8
; RV64-BITS-256-NEXT: csrr a0, vlenb
; RV64-BITS-256-NEXT: addi a0, a0, -1
@@ -1266,7 +1266,7 @@ define <vscale x 64 x i8> @reverse_nxv64i8(<vscale x 64 x i8> %a) {
;
; RV64-BITS-512-LABEL: reverse_nxv64i8:
; RV64-BITS-512: # %bb.0:
-; RV64-BITS-512-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-BITS-512-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-BITS-512-NEXT: vmv8r.v v16, v8
; RV64-BITS-512-NEXT: csrr a0, vlenb
; RV64-BITS-512-NEXT: addi a0, a0, -1
@@ -1373,7 +1373,7 @@ define <vscale x 16 x i16> @reverse_nxv16i16(<vscale x 16 x i16> %a) {
define <vscale x 32 x i16> @reverse_nxv32i16(<vscale x 32 x i16> %a) {
; CHECK-LABEL: reverse_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 1
@@ -1465,7 +1465,7 @@ define <vscale x 8 x i32> @reverse_nxv8i32(<vscale x 8 x i32> %a) {
define <vscale x 16 x i32> @reverse_nxv16i32(<vscale x 16 x i32> %a) {
; CHECK-LABEL: reverse_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
@@ -1541,7 +1541,7 @@ define <vscale x 4 x i64> @reverse_nxv4i64(<vscale x 4 x i64> %a) {
define <vscale x 8 x i64> @reverse_nxv8i64(<vscale x 8 x i64> %a) {
; CHECK-LABEL: reverse_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
@@ -1653,7 +1653,7 @@ define <vscale x 16 x bfloat> @reverse_nxv16bf16(<vscale x 16 x bfloat> %a) {
define <vscale x 32 x bfloat> @reverse_nxv32bf16(<vscale x 32 x bfloat> %a) {
; CHECK-LABEL: reverse_nxv32bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 1
@@ -1761,7 +1761,7 @@ define <vscale x 16 x half> @reverse_nxv16f16(<vscale x 16 x half> %a) {
define <vscale x 32 x half> @reverse_nxv32f16(<vscale x 32 x half> %a) {
; CHECK-LABEL: reverse_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 1
@@ -1853,7 +1853,7 @@ define <vscale x 8 x float> @reverse_nxv8f32(<vscale x 8 x float> %a) {
define <vscale x 16 x float> @reverse_nxv16f32(<vscale x 16 x float> %a) {
; CHECK-LABEL: reverse_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
@@ -1929,7 +1929,7 @@ define <vscale x 4 x double> @reverse_nxv4f64(<vscale x 4 x double> %a) {
define <vscale x 8 x double> @reverse_nxv8f64(<vscale x 8 x double> %a) {
; CHECK-LABEL: reverse_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
index 4474d74007ed30..438d4cdf7d197b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
@@ -117,7 +117,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.nearbyint.nxv4bf16(<vscale x 4 x bfloat>,
define <vscale x 4 x bfloat> @vp_nearbyint_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv4bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -170,7 +170,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.nearbyint.nxv8bf16(<vscale x 8 x bfloat>,
define <vscale x 8 x bfloat> @vp_nearbyint_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv8bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -223,7 +223,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.nearbyint.nxv16bf16(<vscale x 16 x bfloa
define <vscale x 16 x bfloat> @vp_nearbyint_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv16bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -276,7 +276,7 @@ declare <vscale x 32 x bfloat> @llvm.vp.nearbyint.nxv32bf16(<vscale x 32 x bfloa
define <vscale x 32 x bfloat> @vp_nearbyint_nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv32bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -570,7 +570,7 @@ define <vscale x 4 x half> @vp_nearbyint_nxv4f16(<vscale x 4 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vp_nearbyint_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -638,7 +638,7 @@ declare <vscale x 8 x half> @llvm.vp.nearbyint.nxv8f16(<vscale x 8 x half>, <vsc
define <vscale x 8 x half> @vp_nearbyint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -658,7 +658,7 @@ define <vscale x 8 x half> @vp_nearbyint_nxv8f16(<vscale x 8 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vp_nearbyint_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -726,7 +726,7 @@ declare <vscale x 16 x half> @llvm.vp.nearbyint.nxv16f16(<vscale x 16 x half>, <
define <vscale x 16 x half> @vp_nearbyint_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -746,7 +746,7 @@ define <vscale x 16 x half> @vp_nearbyint_nxv16f16(<vscale x 16 x half> %va, <vs
;
; ZVFHMIN-LABEL: vp_nearbyint_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -814,7 +814,7 @@ declare <vscale x 32 x half> @llvm.vp.nearbyint.nxv32f16(<vscale x 32 x half>, <
define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -834,7 +834,7 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vs
;
; ZVFHMIN-LABEL: vp_nearbyint_nxv32f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -1047,7 +1047,7 @@ declare <vscale x 4 x float> @llvm.vp.nearbyint.nxv4f32(<vscale x 4 x float>, <v
define <vscale x 4 x float> @vp_nearbyint_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1092,7 +1092,7 @@ declare <vscale x 8 x float> @llvm.vp.nearbyint.nxv8f32(<vscale x 8 x float>, <v
define <vscale x 8 x float> @vp_nearbyint_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1137,7 +1137,7 @@ declare <vscale x 16 x float> @llvm.vp.nearbyint.nxv16f32(<vscale x 16 x float>,
define <vscale x 16 x float> @vp_nearbyint_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1224,7 +1224,7 @@ declare <vscale x 2 x double> @llvm.vp.nearbyint.nxv2f64(<vscale x 2 x double>,
define <vscale x 2 x double> @vp_nearbyint_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1269,7 +1269,7 @@ declare <vscale x 4 x double> @llvm.vp.nearbyint.nxv4f64(<vscale x 4 x double>,
define <vscale x 4 x double> @vp_nearbyint_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1314,7 +1314,7 @@ declare <vscale x 7 x double> @llvm.vp.nearbyint.nxv7f64(<vscale x 7 x double>,
define <vscale x 7 x double> @vp_nearbyint_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1359,7 +1359,7 @@ declare <vscale x 8 x double> @llvm.vp.nearbyint.nxv8f64(<vscale x 8 x double>,
define <vscale x 8 x double> @vp_nearbyint_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1405,7 +1405,7 @@ declare <vscale x 16 x double> @llvm.vp.nearbyint.nxv16f64(<vscale x 16 x double
define <vscale x 16 x double> @vp_nearbyint_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr88576.ll b/llvm/test/CodeGen/RISCV/rvv/pr88576.ll
index 2fd4233352440a..082a8e8d714c6a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr88576.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr88576.ll
@@ -23,7 +23,7 @@ define i1 @foo(<vscale x 16 x i8> %x, i64 %y) {
; CHECK-NEXT: slli a2, a2, 4
; CHECK-NEXT: sub sp, sp, a2
; CHECK-NEXT: andi sp, sp, -64
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: addi a2, sp, 64
; CHECK-NEXT: slli a1, a1, 3
@@ -54,7 +54,7 @@ define i1 @foo(<vscale x 16 x i8> %x, i64 %y) {
define i8 @bar(<vscale x 128 x i1> %x, i64 %y) {
; CHECK-LABEL: bar:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v1, v8
; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v0, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
index 72cc477a261d33..c23b71789311d3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
@@ -109,7 +109,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.rint.nxv4bf16(<vscale x 4 x bfloat>, <vsc
define <vscale x 4 x bfloat> @vp_rint_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv4bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -158,7 +158,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.rint.nxv8bf16(<vscale x 8 x bfloat>, <vsc
define <vscale x 8 x bfloat> @vp_rint_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv8bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -207,7 +207,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.rint.nxv16bf16(<vscale x 16 x bfloat>, <
define <vscale x 16 x bfloat> @vp_rint_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv16bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -262,7 +262,7 @@ define <vscale x 32 x bfloat> @vp_rint_nxv32bf16(<vscale x 32 x bfloat> %va, <vs
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -539,7 +539,7 @@ define <vscale x 4 x half> @vp_rint_nxv4f16(<vscale x 4 x half> %va, <vscale x 4
;
; ZVFHMIN-LABEL: vp_rint_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -601,7 +601,7 @@ declare <vscale x 8 x half> @llvm.vp.rint.nxv8f16(<vscale x 8 x half>, <vscale x
define <vscale x 8 x half> @vp_rint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -619,7 +619,7 @@ define <vscale x 8 x half> @vp_rint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8
;
; ZVFHMIN-LABEL: vp_rint_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -681,7 +681,7 @@ declare <vscale x 16 x half> @llvm.vp.rint.nxv16f16(<vscale x 16 x half>, <vscal
define <vscale x 16 x half> @vp_rint_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -699,7 +699,7 @@ define <vscale x 16 x half> @vp_rint_nxv16f16(<vscale x 16 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vp_rint_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -761,7 +761,7 @@ declare <vscale x 32 x half> @llvm.vp.rint.nxv32f16(<vscale x 32 x half>, <vscal
define <vscale x 32 x half> @vp_rint_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -785,7 +785,7 @@ define <vscale x 32 x half> @vp_rint_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -989,7 +989,7 @@ declare <vscale x 4 x float> @llvm.vp.rint.nxv4f32(<vscale x 4 x float>, <vscale
define <vscale x 4 x float> @vp_rint_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1030,7 +1030,7 @@ declare <vscale x 8 x float> @llvm.vp.rint.nxv8f32(<vscale x 8 x float>, <vscale
define <vscale x 8 x float> @vp_rint_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1071,7 +1071,7 @@ declare <vscale x 16 x float> @llvm.vp.rint.nxv16f32(<vscale x 16 x float>, <vsc
define <vscale x 16 x float> @vp_rint_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1150,7 +1150,7 @@ declare <vscale x 2 x double> @llvm.vp.rint.nxv2f64(<vscale x 2 x double>, <vsca
define <vscale x 2 x double> @vp_rint_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1191,7 +1191,7 @@ declare <vscale x 4 x double> @llvm.vp.rint.nxv4f64(<vscale x 4 x double>, <vsca
define <vscale x 4 x double> @vp_rint_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1232,7 +1232,7 @@ declare <vscale x 7 x double> @llvm.vp.rint.nxv7f64(<vscale x 7 x double>, <vsca
define <vscale x 7 x double> @vp_rint_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1273,7 +1273,7 @@ declare <vscale x 8 x double> @llvm.vp.rint.nxv8f64(<vscale x 8 x double>, <vsca
define <vscale x 8 x double> @vp_rint_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1321,7 +1321,7 @@ define <vscale x 16 x double> @vp_rint_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
index 441634e934b087..5898d1dc4dca27 100644
--- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
@@ -117,7 +117,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.round.nxv4bf16(<vscale x 4 x bfloat>, <vs
define <vscale x 4 x bfloat> @vp_round_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv4bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -170,7 +170,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.round.nxv8bf16(<vscale x 8 x bfloat>, <vs
define <vscale x 8 x bfloat> @vp_round_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv8bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -223,7 +223,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.round.nxv16bf16(<vscale x 16 x bfloat>,
define <vscale x 16 x bfloat> @vp_round_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv16bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -282,7 +282,7 @@ define <vscale x 32 x bfloat> @vp_round_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -586,7 +586,7 @@ define <vscale x 4 x half> @vp_round_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vp_round_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -654,7 +654,7 @@ declare <vscale x 8 x half> @llvm.vp.round.nxv8f16(<vscale x 8 x half>, <vscale
define <vscale x 8 x half> @vp_round_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -674,7 +674,7 @@ define <vscale x 8 x half> @vp_round_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vp_round_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -742,7 +742,7 @@ declare <vscale x 16 x half> @llvm.vp.round.nxv16f16(<vscale x 16 x half>, <vsca
define <vscale x 16 x half> @vp_round_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -762,7 +762,7 @@ define <vscale x 16 x half> @vp_round_nxv16f16(<vscale x 16 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vp_round_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -830,7 +830,7 @@ declare <vscale x 32 x half> @llvm.vp.round.nxv32f16(<vscale x 32 x half>, <vsca
define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -856,7 +856,7 @@ define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -1079,7 +1079,7 @@ declare <vscale x 4 x float> @llvm.vp.round.nxv4f32(<vscale x 4 x float>, <vscal
define <vscale x 4 x float> @vp_round_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1124,7 +1124,7 @@ declare <vscale x 8 x float> @llvm.vp.round.nxv8f32(<vscale x 8 x float>, <vscal
define <vscale x 8 x float> @vp_round_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1169,7 +1169,7 @@ declare <vscale x 16 x float> @llvm.vp.round.nxv16f32(<vscale x 16 x float>, <vs
define <vscale x 16 x float> @vp_round_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1256,7 +1256,7 @@ declare <vscale x 2 x double> @llvm.vp.round.nxv2f64(<vscale x 2 x double>, <vsc
define <vscale x 2 x double> @vp_round_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1301,7 +1301,7 @@ declare <vscale x 4 x double> @llvm.vp.round.nxv4f64(<vscale x 4 x double>, <vsc
define <vscale x 4 x double> @vp_round_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1346,7 +1346,7 @@ declare <vscale x 7 x double> @llvm.vp.round.nxv7f64(<vscale x 7 x double>, <vsc
define <vscale x 7 x double> @vp_round_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1391,7 +1391,7 @@ declare <vscale x 8 x double> @llvm.vp.round.nxv8f64(<vscale x 8 x double>, <vsc
define <vscale x 8 x double> @vp_round_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1443,7 +1443,7 @@ define <vscale x 16 x double> @vp_round_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
index 786e444dafe3c1..9e86f3b873ff6a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
@@ -117,7 +117,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.roundeven.nxv4bf16(<vscale x 4 x bfloat>,
define <vscale x 4 x bfloat> @vp_roundeven_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv4bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -170,7 +170,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.roundeven.nxv8bf16(<vscale x 8 x bfloat>,
define <vscale x 8 x bfloat> @vp_roundeven_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv8bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -223,7 +223,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.roundeven.nxv16bf16(<vscale x 16 x bfloa
define <vscale x 16 x bfloat> @vp_roundeven_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv16bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -282,7 +282,7 @@ define <vscale x 32 x bfloat> @vp_roundeven_nxv32bf16(<vscale x 32 x bfloat> %va
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -586,7 +586,7 @@ define <vscale x 4 x half> @vp_roundeven_nxv4f16(<vscale x 4 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vp_roundeven_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -654,7 +654,7 @@ declare <vscale x 8 x half> @llvm.vp.roundeven.nxv8f16(<vscale x 8 x half>, <vsc
define <vscale x 8 x half> @vp_roundeven_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -674,7 +674,7 @@ define <vscale x 8 x half> @vp_roundeven_nxv8f16(<vscale x 8 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vp_roundeven_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -742,7 +742,7 @@ declare <vscale x 16 x half> @llvm.vp.roundeven.nxv16f16(<vscale x 16 x half>, <
define <vscale x 16 x half> @vp_roundeven_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -762,7 +762,7 @@ define <vscale x 16 x half> @vp_roundeven_nxv16f16(<vscale x 16 x half> %va, <vs
;
; ZVFHMIN-LABEL: vp_roundeven_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -830,7 +830,7 @@ declare <vscale x 32 x half> @llvm.vp.roundeven.nxv32f16(<vscale x 32 x half>, <
define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -856,7 +856,7 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vs
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -1079,7 +1079,7 @@ declare <vscale x 4 x float> @llvm.vp.roundeven.nxv4f32(<vscale x 4 x float>, <v
define <vscale x 4 x float> @vp_roundeven_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1124,7 +1124,7 @@ declare <vscale x 8 x float> @llvm.vp.roundeven.nxv8f32(<vscale x 8 x float>, <v
define <vscale x 8 x float> @vp_roundeven_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1169,7 +1169,7 @@ declare <vscale x 16 x float> @llvm.vp.roundeven.nxv16f32(<vscale x 16 x float>,
define <vscale x 16 x float> @vp_roundeven_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1256,7 +1256,7 @@ declare <vscale x 2 x double> @llvm.vp.roundeven.nxv2f64(<vscale x 2 x double>,
define <vscale x 2 x double> @vp_roundeven_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1301,7 +1301,7 @@ declare <vscale x 4 x double> @llvm.vp.roundeven.nxv4f64(<vscale x 4 x double>,
define <vscale x 4 x double> @vp_roundeven_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1346,7 +1346,7 @@ declare <vscale x 7 x double> @llvm.vp.roundeven.nxv7f64(<vscale x 7 x double>,
define <vscale x 7 x double> @vp_roundeven_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1391,7 +1391,7 @@ declare <vscale x 8 x double> @llvm.vp.roundeven.nxv8f64(<vscale x 8 x double>,
define <vscale x 8 x double> @vp_roundeven_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1443,7 +1443,7 @@ define <vscale x 16 x double> @vp_roundeven_nxv16f64(<vscale x 16 x double> %va,
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
index 9a3c7aa46f3306..c303ffc871461e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
@@ -117,7 +117,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.roundtozero.nxv4bf16(<vscale x 4 x bfloat
define <vscale x 4 x bfloat> @vp_roundtozero_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv4bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -170,7 +170,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.roundtozero.nxv8bf16(<vscale x 8 x bfloat
define <vscale x 8 x bfloat> @vp_roundtozero_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv8bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -223,7 +223,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.roundtozero.nxv16bf16(<vscale x 16 x bfl
define <vscale x 16 x bfloat> @vp_roundtozero_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv16bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -282,7 +282,7 @@ define <vscale x 32 x bfloat> @vp_roundtozero_nxv32bf16(<vscale x 32 x bfloat> %
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -586,7 +586,7 @@ define <vscale x 4 x half> @vp_roundtozero_nxv4f16(<vscale x 4 x half> %va, <vsc
;
; ZVFHMIN-LABEL: vp_roundtozero_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -654,7 +654,7 @@ declare <vscale x 8 x half> @llvm.vp.roundtozero.nxv8f16(<vscale x 8 x half>, <v
define <vscale x 8 x half> @vp_roundtozero_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -674,7 +674,7 @@ define <vscale x 8 x half> @vp_roundtozero_nxv8f16(<vscale x 8 x half> %va, <vsc
;
; ZVFHMIN-LABEL: vp_roundtozero_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -742,7 +742,7 @@ declare <vscale x 16 x half> @llvm.vp.roundtozero.nxv16f16(<vscale x 16 x half>,
define <vscale x 16 x half> @vp_roundtozero_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -762,7 +762,7 @@ define <vscale x 16 x half> @vp_roundtozero_nxv16f16(<vscale x 16 x half> %va, <
;
; ZVFHMIN-LABEL: vp_roundtozero_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -830,7 +830,7 @@ declare <vscale x 32 x half> @llvm.vp.roundtozero.nxv32f16(<vscale x 32 x half>,
define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -856,7 +856,7 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -1079,7 +1079,7 @@ declare <vscale x 4 x float> @llvm.vp.roundtozero.nxv4f32(<vscale x 4 x float>,
define <vscale x 4 x float> @vp_roundtozero_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1124,7 +1124,7 @@ declare <vscale x 8 x float> @llvm.vp.roundtozero.nxv8f32(<vscale x 8 x float>,
define <vscale x 8 x float> @vp_roundtozero_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1169,7 +1169,7 @@ declare <vscale x 16 x float> @llvm.vp.roundtozero.nxv16f32(<vscale x 16 x float
define <vscale x 16 x float> @vp_roundtozero_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1256,7 +1256,7 @@ declare <vscale x 2 x double> @llvm.vp.roundtozero.nxv2f64(<vscale x 2 x double>
define <vscale x 2 x double> @vp_roundtozero_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1301,7 +1301,7 @@ declare <vscale x 4 x double> @llvm.vp.roundtozero.nxv4f64(<vscale x 4 x double>
define <vscale x 4 x double> @vp_roundtozero_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1346,7 +1346,7 @@ declare <vscale x 7 x double> @llvm.vp.roundtozero.nxv7f64(<vscale x 7 x double>
define <vscale x 7 x double> @vp_roundtozero_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1391,7 +1391,7 @@ declare <vscale x 8 x double> @llvm.vp.roundtozero.nxv8f64(<vscale x 8 x double>
define <vscale x 8 x double> @vp_roundtozero_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1443,7 +1443,7 @@ define <vscale x 16 x double> @vp_roundtozero_nxv16f64(<vscale x 16 x double> %v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
index cee838aa92585a..a88d5f1bec2468 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
@@ -17,7 +17,7 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O0-NEXT: slli a1, a1, 1
; SPILL-O0-NEXT: sub sp, sp, a1
; SPILL-O0-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
-; SPILL-O0-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; SPILL-O0-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; SPILL-O0-NEXT: vmv1r.v v10, v9
; SPILL-O0-NEXT: vmv1r.v v9, v8
; SPILL-O0-NEXT: csrr a1, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
index e4d907f2af568e..9b01acad94aeb2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
@@ -20,7 +20,7 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O0-NEXT: slli a1, a1, 1
; SPILL-O0-NEXT: sub sp, sp, a1
; SPILL-O0-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
-; SPILL-O0-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; SPILL-O0-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; SPILL-O0-NEXT: vmv1r.v v10, v9
; SPILL-O0-NEXT: vmv1r.v v9, v8
; SPILL-O0-NEXT: csrr a1, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
index de64b8e94332a6..381d1c61b99862 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
@@ -47,7 +47,7 @@ define <vscale x 16 x i32> @foo(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5,
; CHECK-NEXT: vs8r.v v8, (t1)
; CHECK-NEXT: sd t1, 0(sp)
; CHECK-NEXT: sd t0, 8(sp)
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: call bar
; CHECK-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
index 98a92ae551f6de..fcf2d543a070fd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
@@ -941,7 +941,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32(
define <vscale x 2 x i32> @vredsum(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 %vl) {
; CHECK-LABEL: vredsum:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
; CHECK-NEXT: vredsum.vs v11, v9, v10
@@ -966,7 +966,7 @@ define <vscale x 2 x float> @vfredusum(<vscale x 2 x float> %passthru, <vscale x
; CHECK-LABEL: vfredusum:
; CHECK: # %bb.0:
; CHECK-NEXT: fsrmi a1, 0
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
; CHECK-NEXT: vfredusum.vs v11, v9, v10
@@ -1018,7 +1018,7 @@ define <vscale x 2 x float> @vfredusum_allones_mask(<vscale x 2 x float> %passth
define <vscale x 2 x i32> @unfoldable_vredsum_allones_mask_diff_vl(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y) {
; CHECK-LABEL: unfoldable_vredsum_allones_mask_diff_vl:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, ma
; CHECK-NEXT: vredsum.vs v11, v9, v10
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
index da3c194652df76..1854ddbb2edd9b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
@@ -1473,7 +1473,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64bf16(<vscale x 64 x bfloat> %va, <vs
; CHECK-NEXT: add a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: mv a3, a1
@@ -3721,7 +3721,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFH-NEXT: slli a1, a1, 4
; ZVFH-NEXT: sub sp, sp, a1
; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v24, v0
; ZVFH-NEXT: csrr a1, vlenb
; ZVFH-NEXT: slli a1, a1, 3
@@ -3783,7 +3783,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: add a1, a1, a3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v24, v0
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: mv a3, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
index 70347754f39c3b..52c6d54c1675c0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
@@ -1092,7 +1092,7 @@ define <vscale x 128 x i1> @icmp_eq_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -1144,7 +1144,7 @@ define <vscale x 128 x i1> @icmp_eq_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
define <vscale x 128 x i1> @icmp_eq_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -1175,7 +1175,7 @@ define <vscale x 128 x i1> @icmp_eq_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b,
define <vscale x 128 x i1> @icmp_eq_vx_swap_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_swap_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -2247,7 +2247,7 @@ define <vscale x 32 x i1> @icmp_eq_vv_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -2303,7 +2303,7 @@ define <vscale x 32 x i1> @icmp_eq_vv_nxv32i32(<vscale x 32 x i32> %va, <vscale
define <vscale x 32 x i1> @icmp_eq_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: srli a2, a3, 2
@@ -2337,7 +2337,7 @@ define <vscale x 32 x i1> @icmp_eq_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b,
define <vscale x 32 x i1> @icmp_eq_vx_swap_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_swap_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: srli a2, a3, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index 56e2a940f388d2..a2de335e103461 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -4865,7 +4865,7 @@ declare <4 x i1> @llvm.vp.icmp.v4i32(<4 x i32>, <4 x i32>, metadata, <4 x i1>, i
define void @sink_splat_vp_icmp(ptr nocapture %x, i32 signext %y, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_icmp:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
@@ -4907,7 +4907,7 @@ declare <4 x i1> @llvm.vp.fcmp.v4f32(<4 x float>, <4 x float>, metadata, <4 x i1
define void @sink_splat_vp_fcmp(ptr nocapture %x, float %y, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_fcmp:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a2, a0, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
index bcd5c919418f11..0c1e9c8025de36 100644
--- a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
@@ -663,7 +663,7 @@ declare <vscale x 3 x double> @llvm.experimental.vp.strided.load.nxv3f64.p0.i32(
define <vscale x 16 x double> @strided_load_nxv16f64(ptr %ptr, i64 %stride, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_load_nxv16f64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv1r.v v9, v0
; CHECK-RV32-NEXT: csrr a4, vlenb
; CHECK-RV32-NEXT: sub a2, a3, a4
@@ -689,7 +689,7 @@ define <vscale x 16 x double> @strided_load_nxv16f64(ptr %ptr, i64 %stride, <vsc
;
; CHECK-RV64-LABEL: strided_load_nxv16f64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv1r.v v9, v0
; CHECK-RV64-NEXT: csrr a4, vlenb
; CHECK-RV64-NEXT: sub a3, a2, a4
@@ -767,7 +767,7 @@ declare <vscale x 16 x double> @llvm.experimental.vp.strided.load.nxv16f64.p0.i6
define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vscale x 17 x i1> %mask, i32 zeroext %evl, ptr %hi_ptr) {
; CHECK-RV32-LABEL: strided_load_nxv17f64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV32-NEXT: vmv1r.v v8, v0
; CHECK-RV32-NEXT: csrr a2, vlenb
; CHECK-RV32-NEXT: slli a7, a2, 1
@@ -815,7 +815,7 @@ define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vsc
;
; CHECK-RV64-LABEL: strided_load_nxv17f64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-RV64-NEXT: vmv1r.v v8, v0
; CHECK-RV64-NEXT: csrr a4, vlenb
; CHECK-RV64-NEXT: slli a7, a4, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
index a71a2de34586d0..e1e79cd6061f46 100644
--- a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
@@ -615,7 +615,7 @@ define void @strided_store_nxv17f64(<vscale x 17 x double> %v, ptr %ptr, i32 sig
; CHECK-NEXT: slli a4, a4, 3
; CHECK-NEXT: sub sp, sp, a4
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: addi a4, sp, 16
; CHECK-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
@@ -631,7 +631,7 @@ define void @strided_store_nxv17f64(<vscale x 17 x double> %v, ptr %ptr, i32 sig
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a7, a4
; CHECK-NEXT: .LBB48_4:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vl8re64.v v16, (a0)
; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
index 418e44c213a0e9..c428b2d1249f58 100644
--- a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
@@ -158,7 +158,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i
define void @repeat_shuffle(<2 x double> %v, ptr noalias %q) {
; CHECK-LABEL: repeat_shuffle:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; CHECK-NEXT: vslideup.vi v10, v8, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
index 2ccc4b723b170f..42b2896d9f7998 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
@@ -565,7 +565,7 @@ declare <vscale x 128 x i8> @llvm.vp.add.nxv128i8(<vscale x 128 x i8>, <vscale x
define <vscale x 128 x i8> @vadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
@@ -1344,7 +1344,7 @@ declare <vscale x 32 x i32> @llvm.vp.add.nxv32i32(<vscale x 32 x i32>, <vscale x
define <vscale x 32 x i32> @vadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -1401,7 +1401,7 @@ declare i32 @llvm.vscale.i32()
define <vscale x 32 x i32> @vadd_vi_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m) {
; CHECK-LABEL: vadd_vi_nxv32i32_evl_nx8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a2, a0, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vcpop.ll b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll
index e26e4f8227974f..ade86910076084 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcpop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll
@@ -43,7 +43,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv1i1(
define iXLen @intrinsic_vcpop_mask_m_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv1i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
@@ -98,7 +98,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv2i1(
define iXLen @intrinsic_vcpop_mask_m_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv2i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
@@ -139,7 +139,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv4i1(
define iXLen @intrinsic_vcpop_mask_m_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv4i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
@@ -180,7 +180,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv8i1(
define iXLen @intrinsic_vcpop_mask_m_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv8i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
@@ -221,7 +221,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv16i1(
define iXLen @intrinsic_vcpop_mask_m_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv16i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
@@ -262,7 +262,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv32i1(
define iXLen @intrinsic_vcpop_mask_m_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv32i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
@@ -303,7 +303,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv64i1(
define iXLen @intrinsic_vcpop_mask_m_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv64i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
index f4617a8b0faab3..9f2b2466385eae 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
@@ -7,7 +7,7 @@
define {<16 x i1>, <16 x i1>} @vector_deinterleave_v16i1_v32i1(<32 x i1> %vec) {
; CHECK-LABEL: vector_deinterleave_v16i1_v32i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
index b4bb23566fe289..c24a67743eb842 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
@@ -131,7 +131,7 @@ ret {<vscale x 64 x i1>, <vscale x 64 x i1>} %retval
define {<vscale x 64 x i8>, <vscale x 64 x i8>} @vector_deinterleave_nxv64i8_nxv128i8(<vscale x 128 x i8> %vec) {
; CHECK-LABEL: vector_deinterleave_nxv64i8_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v24, 0
@@ -147,7 +147,7 @@ ret {<vscale x 64 x i8>, <vscale x 64 x i8>} %retval
define {<vscale x 32 x i16>, <vscale x 32 x i16>} @vector_deinterleave_nxv32i16_nxv64i16(<vscale x 64 x i16> %vec) {
; CHECK-LABEL: vector_deinterleave_nxv32i16_nxv64i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v24, 0
@@ -163,7 +163,7 @@ ret {<vscale x 32 x i16>, <vscale x 32 x i16>} %retval
define {<vscale x 16 x i32>, <vscale x 16 x i32>} @vector_deinterleave_nxv16i32_nxvv32i32(<vscale x 32 x i32> %vec) {
; CHECK-LABEL: vector_deinterleave_nxv16i32_nxvv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v16
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
@@ -192,7 +192,7 @@ define {<vscale x 8 x i64>, <vscale x 8 x i64>} @vector_deinterleave_nxv8i64_nxv
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; CHECK-NEXT: vid.v v8
@@ -391,7 +391,7 @@ declare {<vscale x 2 x double>, <vscale x 2 x double>} @llvm.vector.deinterleave
define {<vscale x 32 x bfloat>, <vscale x 32 x bfloat>} @vector_deinterleave_nxv32bf16_nxv64bf16(<vscale x 64 x bfloat> %vec) {
; CHECK-LABEL: vector_deinterleave_nxv32bf16_nxv64bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v24, 0
@@ -407,7 +407,7 @@ ret {<vscale x 32 x bfloat>, <vscale x 32 x bfloat>} %retval
define {<vscale x 32 x half>, <vscale x 32 x half>} @vector_deinterleave_nxv32f16_nxv64f16(<vscale x 64 x half> %vec) {
; CHECK-LABEL: vector_deinterleave_nxv32f16_nxv64f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v24, 0
@@ -423,7 +423,7 @@ ret {<vscale x 32 x half>, <vscale x 32 x half>} %retval
define {<vscale x 16 x float>, <vscale x 16 x float>} @vector_deinterleave_nxv16f32_nxv32f32(<vscale x 32 x float> %vec) {
; CHECK-LABEL: vector_deinterleave_nxv16f32_nxv32f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v16
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
@@ -452,7 +452,7 @@ define {<vscale x 8 x double>, <vscale x 8 x double>} @vector_deinterleave_nxv8f
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; CHECK-NEXT: vid.v v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
index b363f55b4f45a2..4f57d5f5868dc5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
@@ -91,7 +91,7 @@ define <8 x i32> @vector_interleave_v8i32_v4i32(<4 x i32> %a, <4 x i32> %b) {
define <4 x i64> @vector_interleave_v4i64_v2i64(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: vector_interleave_v4i64_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: lui a0, 12304
; CHECK-NEXT: addi a0, a0, 512
@@ -107,7 +107,7 @@ define <4 x i64> @vector_interleave_v4i64_v2i64(<2 x i64> %a, <2 x i64> %b) {
;
; ZVBB-LABEL: vector_interleave_v4i64_v2i64:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVBB-NEXT: vmv1r.v v10, v9
; ZVBB-NEXT: lui a0, 12304
; ZVBB-NEXT: addi a0, a0, 512
@@ -241,7 +241,7 @@ define <8 x float> @vector_interleave_v8f32_v4f32(<4 x float> %a, <4 x float> %b
define <4 x double> @vector_interleave_v4f64_v2f64(<2 x double> %a, <2 x double> %b) {
; CHECK-LABEL: vector_interleave_v4f64_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: lui a0, 12304
; CHECK-NEXT: addi a0, a0, 512
@@ -257,7 +257,7 @@ define <4 x double> @vector_interleave_v4f64_v2f64(<2 x double> %a, <2 x double>
;
; ZVBB-LABEL: vector_interleave_v4f64_v2f64:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVBB-NEXT: vmv1r.v v10, v9
; ZVBB-NEXT: lui a0, 12304
; ZVBB-NEXT: addi a0, a0, 512
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
index 99a49786fda1e8..e5271da6cea130 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
@@ -9,7 +9,7 @@
define void @vector_interleave_store_nxv32i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, ptr %p) {
; CHECK-LABEL: vector_interleave_store_nxv32i1_nxv16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
index 72fa511e672bf2..56893aa8111fbc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
@@ -11,7 +11,7 @@
define <vscale x 32 x i1> @vector_interleave_nxv32i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
; CHECK-LABEL: vector_interleave_nxv32i1_nxv16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
@@ -33,7 +33,7 @@ define <vscale x 32 x i1> @vector_interleave_nxv32i1_nxv16i1(<vscale x 16 x i1>
;
; ZVBB-LABEL: vector_interleave_nxv32i1_nxv16i1:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVBB-NEXT: vmv1r.v v9, v0
; ZVBB-NEXT: vmv1r.v v0, v8
; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, mu
@@ -162,7 +162,7 @@ declare <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64>,
define <vscale x 128 x i1> @vector_interleave_nxv128i1_nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) {
; CHECK-LABEL: vector_interleave_nxv128i1_nxv64i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
@@ -206,7 +206,7 @@ define <vscale x 128 x i1> @vector_interleave_nxv128i1_nxv64i1(<vscale x 64 x i1
define <vscale x 128 x i8> @vector_interleave_nxv128i8_nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
; CHECK-LABEL: vector_interleave_nxv128i8_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vwaddu.vv v8, v24, v16
@@ -219,7 +219,7 @@ define <vscale x 128 x i8> @vector_interleave_nxv128i8_nxv64i8(<vscale x 64 x i8
;
; ZVBB-LABEL: vector_interleave_nxv128i8_nxv64i8:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; ZVBB-NEXT: vwsll.vi v8, v16, 8
@@ -235,7 +235,7 @@ define <vscale x 128 x i8> @vector_interleave_nxv128i8_nxv64i8(<vscale x 64 x i8
define <vscale x 64 x i16> @vector_interleave_nxv64i16_nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
; CHECK-LABEL: vector_interleave_nxv64i16_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vwaddu.vv v8, v24, v16
@@ -248,7 +248,7 @@ define <vscale x 64 x i16> @vector_interleave_nxv64i16_nxv32i16(<vscale x 32 x i
;
; ZVBB-LABEL: vector_interleave_nxv64i16_nxv32i16:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVBB-NEXT: vwsll.vi v8, v16, 16
@@ -264,7 +264,7 @@ define <vscale x 64 x i16> @vector_interleave_nxv64i16_nxv32i16(<vscale x 32 x i
define <vscale x 32 x i32> @vector_interleave_nxv32i32_nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b) {
; CHECK-LABEL: vector_interleave_nxv32i32_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
; CHECK-NEXT: vwaddu.vv v8, v24, v16
@@ -277,7 +277,7 @@ define <vscale x 32 x i32> @vector_interleave_nxv32i32_nxv16i32(<vscale x 16 x i
;
; ZVBB-LABEL: vector_interleave_nxv32i32_nxv16i32:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: li a0, 32
; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma
@@ -584,7 +584,7 @@ declare <vscale x 4 x double> @llvm.vector.interleave2.nxv4f64(<vscale x 2 x dou
define <vscale x 64 x bfloat> @vector_interleave_nxv64bf16_nxv32bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b) {
; CHECK-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vwaddu.vv v8, v24, v16
@@ -597,7 +597,7 @@ define <vscale x 64 x bfloat> @vector_interleave_nxv64bf16_nxv32bf16(<vscale x 3
;
; ZVBB-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVBB-NEXT: vwsll.vi v8, v16, 16
@@ -613,7 +613,7 @@ define <vscale x 64 x bfloat> @vector_interleave_nxv64bf16_nxv32bf16(<vscale x 3
define <vscale x 64 x half> @vector_interleave_nxv64f16_nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b) {
; CHECK-LABEL: vector_interleave_nxv64f16_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vwaddu.vv v8, v24, v16
@@ -626,7 +626,7 @@ define <vscale x 64 x half> @vector_interleave_nxv64f16_nxv32f16(<vscale x 32 x
;
; ZVBB-LABEL: vector_interleave_nxv64f16_nxv32f16:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVBB-NEXT: vwsll.vi v8, v16, 16
@@ -642,7 +642,7 @@ define <vscale x 64 x half> @vector_interleave_nxv64f16_nxv32f16(<vscale x 32 x
define <vscale x 32 x float> @vector_interleave_nxv32f32_nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b) {
; CHECK-LABEL: vector_interleave_nxv32f32_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
; CHECK-NEXT: vwaddu.vv v8, v24, v16
@@ -655,7 +655,7 @@ define <vscale x 32 x float> @vector_interleave_nxv32f32_nxv16f32(<vscale x 16 x
;
; ZVBB-LABEL: vector_interleave_nxv32f32_nxv16f32:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: li a0, 32
; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll b/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
index b58ac4d0520648..33bb3e89cb90ab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
@@ -120,7 +120,7 @@ entry:
define <vscale x 1 x i8> @vadd_vv_passthru(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: vadd_vv_passthru:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vadd.vv v10, v8, v9
@@ -153,7 +153,7 @@ entry:
define <vscale x 1 x i8> @vadd_vv_passthru_negative(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: vadd_vv_passthru_negative:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vadd.vv v10, v8, v9
@@ -185,7 +185,7 @@ entry:
define <vscale x 1 x i8> @vadd_vv_mask(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %m) nounwind {
; CHECK-LABEL: vadd_vv_mask:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vadd.vv v10, v8, v9, v0.t
@@ -221,7 +221,7 @@ entry:
define <vscale x 1 x i8> @vadd_vv_mask_negative(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %m, <vscale x 1 x i1> %m2) nounwind {
; CHECK-LABEL: vadd_vv_mask_negative:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vadd.vv v11, v8, v9, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
index 7c79abb0e80987..89c4e1a112d777 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
@@ -11,7 +11,7 @@ declare <vscale x 1 x i1> @llvm.vector.splice.nxv1i1(<vscale x 1 x i1>, <vscale
define <vscale x 1 x i1> @splice_nxv1i1_offset_negone(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv1i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
@@ -34,7 +34,7 @@ define <vscale x 1 x i1> @splice_nxv1i1_offset_negone(<vscale x 1 x i1> %a, <vsc
define <vscale x 1 x i1> @splice_nxv1i1_offset_max(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv1i1_offset_max:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
@@ -61,7 +61,7 @@ declare <vscale x 2 x i1> @llvm.vector.splice.nxv2i1(<vscale x 2 x i1>, <vscale
define <vscale x 2 x i1> @splice_nxv2i1_offset_negone(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv2i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
@@ -84,7 +84,7 @@ define <vscale x 2 x i1> @splice_nxv2i1_offset_negone(<vscale x 2 x i1> %a, <vsc
define <vscale x 2 x i1> @splice_nxv2i1_offset_max(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv2i1_offset_max:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
@@ -111,7 +111,7 @@ declare <vscale x 4 x i1> @llvm.vector.splice.nxv4i1(<vscale x 4 x i1>, <vscale
define <vscale x 4 x i1> @splice_nxv4i1_offset_negone(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv4i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
@@ -134,7 +134,7 @@ define <vscale x 4 x i1> @splice_nxv4i1_offset_negone(<vscale x 4 x i1> %a, <vsc
define <vscale x 4 x i1> @splice_nxv4i1_offset_max(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv4i1_offset_max:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
@@ -161,7 +161,7 @@ declare <vscale x 8 x i1> @llvm.vector.splice.nxv8i1(<vscale x 8 x i1>, <vscale
define <vscale x 8 x i1> @splice_nxv8i1_offset_negone(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv8i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
@@ -183,7 +183,7 @@ define <vscale x 8 x i1> @splice_nxv8i1_offset_negone(<vscale x 8 x i1> %a, <vsc
define <vscale x 8 x i1> @splice_nxv8i1_offset_max(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv8i1_offset_max:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
@@ -209,7 +209,7 @@ declare <vscale x 16 x i1> @llvm.vector.splice.nxv16i1(<vscale x 16 x i1>, <vsca
define <vscale x 16 x i1> @splice_nxv16i1_offset_negone(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv16i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
@@ -232,7 +232,7 @@ define <vscale x 16 x i1> @splice_nxv16i1_offset_negone(<vscale x 16 x i1> %a, <
define <vscale x 16 x i1> @splice_nxv16i1_offset_max(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv16i1_offset_max:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
@@ -259,7 +259,7 @@ declare <vscale x 32 x i1> @llvm.vector.splice.nxv32i1(<vscale x 32 x i1>, <vsca
define <vscale x 32 x i1> @splice_nxv32i1_offset_negone(<vscale x 32 x i1> %a, <vscale x 32 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv32i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
@@ -307,7 +307,7 @@ declare <vscale x 64 x i1> @llvm.vector.splice.nxv64i1(<vscale x 64 x i1>, <vsca
define <vscale x 64 x i1> @splice_nxv64i1_offset_negone(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv64i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
index 9b9bb20cb54981..f094d4ee175c38 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
@@ -462,7 +462,7 @@ declare <vscale x 16 x double> @llvm.vp.fabs.nxv16f64(<vscale x 16 x double>, <v
define <vscale x 16 x double> @vfabs_vv_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfabs_vv_nxv16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
index 67d28dc3369f32..5721737be3f3ed 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
@@ -411,7 +411,7 @@ define <vscale x 32 x bfloat> @vfadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: addi a1, sp, 16
@@ -519,7 +519,7 @@ define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bf
; CHECK-NEXT: add a1, a2, a1
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: fmv.x.h a1, fa0
; CHECK-NEXT: csrr a2, vlenb
@@ -606,7 +606,7 @@ define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16_unmasked(<vscale x 32 x bfloat
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: fmv.x.h a1, fa0
; CHECK-NEXT: csrr a2, vlenb
@@ -1208,7 +1208,7 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: addi a1, sp, 16
@@ -1328,7 +1328,7 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: add a1, a2, a1
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
@@ -1421,7 +1421,7 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
index e4f2f21cf14d96..efde5b0b3a923f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
@@ -373,7 +373,7 @@ define <vscale x 32 x bfloat> @vfdiv_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: addi a1, sp, 16
@@ -481,7 +481,7 @@ define <vscale x 32 x bfloat> @vfdiv_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bf
; CHECK-NEXT: add a1, a2, a1
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: fmv.x.h a1, fa0
; CHECK-NEXT: csrr a2, vlenb
@@ -568,7 +568,7 @@ define <vscale x 32 x bfloat> @vfdiv_vf_nxv32bf16_unmasked(<vscale x 32 x bfloat
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: fmv.x.h a1, fa0
; CHECK-NEXT: csrr a2, vlenb
@@ -1120,7 +1120,7 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: addi a1, sp, 16
@@ -1240,7 +1240,7 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: add a1, a2, a1
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
@@ -1333,7 +1333,7 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfirst.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll
index a04904141ba458..e803a8416fba8b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfirst.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll
@@ -43,7 +43,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv1i1(
define iXLen @intrinsic_vfirst_mask_m_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv1i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
@@ -98,7 +98,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv2i1(
define iXLen @intrinsic_vfirst_mask_m_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv2i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
@@ -139,7 +139,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv4i1(
define iXLen @intrinsic_vfirst_mask_m_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv4i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
@@ -180,7 +180,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv8i1(
define iXLen @intrinsic_vfirst_mask_m_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv8i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
@@ -221,7 +221,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv16i1(
define iXLen @intrinsic_vfirst_mask_m_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv16i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
@@ -262,7 +262,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv32i1(
define iXLen @intrinsic_vfirst_mask_m_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv32i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
@@ -303,7 +303,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv64i1(
define iXLen @intrinsic_vfirst_mask_m_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv64i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
index b134293f2f6c10..b1652005285a06 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
@@ -628,7 +628,7 @@ define <vscale x 32 x bfloat> @vfma_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <vs
; CHECK-NEXT: add a2, a2, a3
; CHECK-NEXT: sub sp, sp, a2
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vl8re16.v v0, (a0)
; CHECK-NEXT: csrr a2, vlenb
@@ -2194,7 +2194,7 @@ define <vscale x 32 x half> @vfma_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: add a2, a2, a3
; ZVFHMIN-NEXT: sub sp, sp, a2
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v24, v0
; ZVFHMIN-NEXT: vl8re16.v v0, (a0)
; ZVFHMIN-NEXT: csrr a2, vlenb
@@ -3664,7 +3664,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: add a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -7799,7 +7799,7 @@ define <vscale x 16 x half> @vfnmadd_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
;
; ZVFHMIN-LABEL: vfnmadd_vv_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv4r.v v4, v8
; ZVFHMIN-NEXT: lui a1, 8
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
@@ -8156,7 +8156,7 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_neg_splat_commute(<vscale x 16
;
; ZVFHMIN-LABEL: vfnmadd_vf_nxv16f16_neg_splat_commute:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv4r.v v4, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
@@ -8258,7 +8258,7 @@ define <vscale x 16 x half> @vfnmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
;
; ZVFHMIN-LABEL: vfnmsub_vv_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv4r.v v4, v8
; ZVFHMIN-NEXT: lui a1, 8
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
@@ -8559,7 +8559,7 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_neg_splat(<vscale x 16 x half>
;
; ZVFHMIN-LABEL: vfnmsub_vf_nxv16f16_neg_splat:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv4r.v v4, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
@@ -8719,7 +8719,7 @@ define <vscale x 32 x half> @vfmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v8
; ZVFHMIN-NEXT: vl8re16.v v8, (a0)
; ZVFHMIN-NEXT: lui a2, 8
@@ -9285,7 +9285,7 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %v
; ZVFHMIN-NEXT: add a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: fmv.x.h a2, fa0
; ZVFHMIN-NEXT: lui a1, 8
@@ -10010,7 +10010,7 @@ define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked_commuted(<vscale x 32
; ZVFHMIN-NEXT: add a2, a2, a3
; ZVFHMIN-NEXT: sub sp, sp, a2
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
@@ -11164,7 +11164,7 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute(<vsc
; ZVFHMIN-NEXT: add a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
@@ -11777,7 +11777,7 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked_commuted(<vscale x 32
; ZVFHMIN-NEXT: add a2, a2, a3
; ZVFHMIN-NEXT: sub sp, sp, a2
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
@@ -11936,7 +11936,7 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, half
; ZVFHMIN-NEXT: add a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: fmv.x.h a2, fa0
; ZVFHMIN-NEXT: lui a3, 8
@@ -12088,7 +12088,7 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_commute(<vscale x 32 x half> %v
; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: fmv.x.h a2, fa0
; ZVFHMIN-NEXT: lui a3, 8
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
index f6ca80716ad7cf..949614ef2b3b2e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
@@ -227,7 +227,7 @@ define <vscale x 32 x bfloat> @vfmadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v0, v16
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -315,7 +315,7 @@ define <vscale x 32 x bfloat> @vfmadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, <
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: sub sp, sp, a0
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -666,7 +666,7 @@ define <vscale x 32 x half> @vfmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -760,7 +760,7 @@ define <vscale x 32 x half> @vfmadd_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
index 35b23f0af8bd57..0374f0c9dbe327 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
@@ -226,7 +226,7 @@ define <vscale x 32 x bfloat> @vfmadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <
; ZVFH-NEXT: slli a1, a1, 5
; ZVFH-NEXT: sub sp, sp, a1
; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv8r.v v0, v16
; ZVFH-NEXT: addi a1, sp, 16
; ZVFH-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -317,7 +317,7 @@ define <vscale x 32 x bfloat> @vfmadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <
; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -404,7 +404,7 @@ define <vscale x 32 x bfloat> @vfmadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, <
; ZVFH-NEXT: slli a0, a0, 5
; ZVFH-NEXT: sub sp, sp, a0
; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv8r.v v0, v16
; ZVFH-NEXT: addi a0, sp, 16
; ZVFH-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
@@ -501,7 +501,7 @@ define <vscale x 32 x bfloat> @vfmadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, <
; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: sub sp, sp, a0
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
@@ -879,7 +879,7 @@ define <vscale x 32 x half> @vfmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -972,7 +972,7 @@ define <vscale x 32 x half> @vfmadd_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: sub sp, sp, a0
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
index 12a4c7d82af10a..6ac178ef5bfa39 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
@@ -183,7 +183,7 @@ define <vscale x 32 x bfloat> @vfmax_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: addi a1, sp, 16
@@ -517,7 +517,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: addi a1, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
index ec1e2e42eb0cdb..303c6a2ad173cb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
@@ -183,7 +183,7 @@ define <vscale x 32 x bfloat> @vfmin_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: addi a1, sp, 16
@@ -517,7 +517,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: addi a1, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
index c5665554f0d167..98fbc041fd4a57 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
@@ -495,7 +495,7 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: addi a1, sp, 16
@@ -615,7 +615,7 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: add a1, a2, a1
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
@@ -708,7 +708,7 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
index 17ce2a00de04a4..610161b33365ef 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
@@ -1112,7 +1112,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a3, 24
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
index 5e9fec14b759e8..c70dfaa8de38b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
@@ -450,7 +450,7 @@ declare <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double>, <v
define <vscale x 16 x double> @vfneg_vv_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfneg_vv_nxv16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
index fdfdf06a340769..72bec44cc06483 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
@@ -329,7 +329,7 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: li a2, 24
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
index 3c31c8414a6712..63cbfb32a2aa14 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
@@ -310,7 +310,7 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a1, a1, 4
@@ -390,7 +390,7 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
index 32a9b47e99ec1e..a90fe931a190d6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
@@ -96,7 +96,7 @@ declare <vscale x 32 x float> @llvm.vp.fpext.nxv32f32.nxv32f16(<vscale x 32 x ha
define <vscale x 32 x float> @vfpext_nxv32f16_nxv32f32(<vscale x 32 x half> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vfpext_nxv32f16_nxv32f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
index 3a855326692d7a..c7ae8c50b46dd9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.vp.fptosi.nxv32i16.nxv32f32(<vscale x 32 x flo
define <vscale x 32 x i16> @vfptosi_nxv32i16_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptosi_nxv32i16_nxv32f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -539,7 +539,7 @@ declare <vscale x 32 x i32> @llvm.vp.fptosi.nxv32i32.nxv32f32(<vscale x 32 x flo
define <vscale x 32 x i32> @vfptosi_nxv32i32_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptosi_nxv32i32_nxv32f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
index 96ab124a144514..125f18094b3476 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.vp.fptoui.nxv32i16.nxv32f32(<vscale x 32 x flo
define <vscale x 32 x i16> @vfptoui_nxv32i16_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptoui_nxv32i16_nxv32f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -539,7 +539,7 @@ declare <vscale x 32 x i32> @llvm.vp.fptoui.nxv32i32.nxv32f32(<vscale x 32 x flo
define <vscale x 32 x i32> @vfptoui_nxv32i32_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptoui_nxv32i32_nxv32f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
index 1d56c5db968fad..2bd2923339e34b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
@@ -102,7 +102,7 @@ define <vscale x 16 x float> @vfptrunc_nxv16f32_nxv16f64(<vscale x 16 x double>
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -148,7 +148,7 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
index 3e4de295add01b..0036839f7434a2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
@@ -167,7 +167,7 @@ declare <vscale x 32 x bfloat> @llvm.vp.sqrt.nxv32bf16(<vscale x 32 x bfloat>, <
define <vscale x 32 x bfloat> @vfsqrt_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsqrt_vv_nxv32bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a1, a2, 1
@@ -453,7 +453,7 @@ define <vscale x 32 x half> @vfsqrt_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vfsqrt_vv_nxv32f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v16, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
@@ -751,7 +751,7 @@ declare <vscale x 16 x double> @llvm.vp.sqrt.nxv16f64(<vscale x 16 x double>, <v
define <vscale x 16 x double> @vfsqrt_vv_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsqrt_vv_nxv16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
index a9ee0983f1a331..e4ac9130c46357 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
@@ -373,7 +373,7 @@ define <vscale x 32 x bfloat> @vfsub_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: addi a1, sp, 16
@@ -481,7 +481,7 @@ define <vscale x 32 x bfloat> @vfsub_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bf
; CHECK-NEXT: add a1, a2, a1
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: fmv.x.h a1, fa0
; CHECK-NEXT: csrr a2, vlenb
@@ -568,7 +568,7 @@ define <vscale x 32 x bfloat> @vfsub_vf_nxv32bf16_unmasked(<vscale x 32 x bfloat
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: fmv.x.h a1, fa0
; CHECK-NEXT: csrr a2, vlenb
@@ -1120,7 +1120,7 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: addi a1, sp, 16
@@ -1240,7 +1240,7 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: add a1, a2, a1
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
@@ -1333,7 +1333,7 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
index 41ec373ab09d5a..c00ac63ace8b52 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
@@ -111,7 +111,7 @@ define <vscale x 4 x i32> @different_vl_with_ta(<vscale x 4 x i32> %a, <vscale x
define <vscale x 4 x i32> @different_vl_with_tu(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1, iXLen %vl2) {
; CHECK-LABEL: different_vl_with_tu:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v14, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
; CHECK-NEXT: vadd.vv v14, v10, v12
@@ -127,7 +127,7 @@ define <vscale x 4 x i32> @different_vl_with_tu(<vscale x 4 x i32> %passthru, <v
define <vscale x 4 x i32> @different_imm_vl_with_tu(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1, iXLen %vl2) {
; CHECK-LABEL: different_imm_vl_with_tu:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v14, v10
; CHECK-NEXT: vsetivli zero, 5, e32, m2, tu, ma
; CHECK-NEXT: vadd.vv v14, v10, v12
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll
index a4aae25f93c1d2..389d1f3ed81e47 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll
@@ -51,7 +51,7 @@ entry:
define <vscale x 16 x i16> @test_vlseg2ff_mask_dead_vl(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vlseg2ff_mask_dead_vl:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll
index 5d9ba18deb5024..889d7600e0d220 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll
@@ -25,7 +25,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg2ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -65,7 +65,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg2ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -105,7 +105,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg2ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -145,7 +145,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg2ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -185,7 +185,7 @@ entry:
define <vscale x 16 x i8> @test_vlseg2ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -225,7 +225,7 @@ entry:
define <vscale x 32 x i8> @test_vlseg2ff_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 32 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -265,7 +265,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg3ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -306,7 +306,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg3ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -347,7 +347,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg3ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -388,7 +388,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg3ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -429,7 +429,7 @@ entry:
define <vscale x 16 x i8> @test_vlseg3ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -470,7 +470,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg4ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -512,7 +512,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg4ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -554,7 +554,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg4ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -596,7 +596,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg4ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -638,7 +638,7 @@ entry:
define <vscale x 16 x i8> @test_vlseg4ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -680,7 +680,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg5ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -723,7 +723,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg5ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -766,7 +766,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg5ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -809,7 +809,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg5ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -852,7 +852,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg6ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -896,7 +896,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg6ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -940,7 +940,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg6ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -984,7 +984,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg6ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1028,7 +1028,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1073,7 +1073,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1118,7 +1118,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1163,7 +1163,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1208,7 +1208,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1254,7 +1254,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1300,7 +1300,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1346,7 +1346,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1391,7 +1391,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg2ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1430,7 +1430,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg2ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1469,7 +1469,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg2ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1508,7 +1508,7 @@ entry:
define <vscale x 8 x i16> @test_vlseg2ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1547,7 +1547,7 @@ entry:
define <vscale x 16 x i16> @test_vlseg2ff_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1586,7 +1586,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg3ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1626,7 +1626,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg3ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1666,7 +1666,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg3ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1706,7 +1706,7 @@ entry:
define <vscale x 8 x i16> @test_vlseg3ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -1746,7 +1746,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg4ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1787,7 +1787,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg4ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1828,7 +1828,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg4ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1869,7 +1869,7 @@ entry:
define <vscale x 8 x i16> @test_vlseg4ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -1910,7 +1910,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg5ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1952,7 +1952,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg5ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1994,7 +1994,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg5ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2036,7 +2036,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg6ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2079,7 +2079,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg6ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2122,7 +2122,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg6ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2165,7 +2165,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2209,7 +2209,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2253,7 +2253,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2297,7 +2297,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2342,7 +2342,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2387,7 +2387,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2432,7 +2432,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg2ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -2471,7 +2471,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg2ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -2510,7 +2510,7 @@ entry:
define <vscale x 4 x i32> @test_vlseg2ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -2549,7 +2549,7 @@ entry:
define <vscale x 8 x i32> @test_vlseg2ff_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -2588,7 +2588,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg3ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2628,7 +2628,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg3ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2668,7 +2668,7 @@ entry:
define <vscale x 4 x i32> @test_vlseg3ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -2708,7 +2708,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg4ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2749,7 +2749,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg4ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2790,7 +2790,7 @@ entry:
define <vscale x 4 x i32> @test_vlseg4ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -2831,7 +2831,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg5ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2873,7 +2873,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg5ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2915,7 +2915,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg6ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2958,7 +2958,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg6ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3001,7 +3001,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3045,7 +3045,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3089,7 +3089,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3134,7 +3134,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3179,7 +3179,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg2ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -3218,7 +3218,7 @@ entry:
define <vscale x 2 x i64> @test_vlseg2ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -3257,7 +3257,7 @@ entry:
define <vscale x 4 x i64> @test_vlseg2ff_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -3296,7 +3296,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg3ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3336,7 +3336,7 @@ entry:
define <vscale x 2 x i64> @test_vlseg3ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -3376,7 +3376,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg4ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3417,7 +3417,7 @@ entry:
define <vscale x 2 x i64> @test_vlseg4ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -3458,7 +3458,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg5ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3500,7 +3500,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg6ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3543,7 +3543,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3587,7 +3587,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3631,7 +3631,7 @@ entry:
define <vscale x 1 x half> @test_vlseg2ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -3669,7 +3669,7 @@ entry:
define <vscale x 2 x half> @test_vlseg2ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -3707,7 +3707,7 @@ entry:
define <vscale x 4 x half> @test_vlseg2ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -3745,7 +3745,7 @@ entry:
define <vscale x 8 x half> @test_vlseg2ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -3783,7 +3783,7 @@ entry:
define <vscale x 16 x half> @test_vlseg2ff_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -3821,7 +3821,7 @@ entry:
define <vscale x 1 x half> @test_vlseg3ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3860,7 +3860,7 @@ entry:
define <vscale x 2 x half> @test_vlseg3ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3899,7 +3899,7 @@ entry:
define <vscale x 4 x half> @test_vlseg3ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3938,7 +3938,7 @@ entry:
define <vscale x 8 x half> @test_vlseg3ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -3977,7 +3977,7 @@ entry:
define <vscale x 1 x half> @test_vlseg4ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4017,7 +4017,7 @@ entry:
define <vscale x 2 x half> @test_vlseg4ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4057,7 +4057,7 @@ entry:
define <vscale x 4 x half> @test_vlseg4ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4097,7 +4097,7 @@ entry:
define <vscale x 8 x half> @test_vlseg4ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -4137,7 +4137,7 @@ entry:
define <vscale x 1 x half> @test_vlseg5ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4178,7 +4178,7 @@ entry:
define <vscale x 2 x half> @test_vlseg5ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4219,7 +4219,7 @@ entry:
define <vscale x 4 x half> @test_vlseg5ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4260,7 +4260,7 @@ entry:
define <vscale x 1 x half> @test_vlseg6ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4302,7 +4302,7 @@ entry:
define <vscale x 2 x half> @test_vlseg6ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4344,7 +4344,7 @@ entry:
define <vscale x 4 x half> @test_vlseg6ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4386,7 +4386,7 @@ entry:
define <vscale x 1 x half> @test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4429,7 +4429,7 @@ entry:
define <vscale x 2 x half> @test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4472,7 +4472,7 @@ entry:
define <vscale x 4 x half> @test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4515,7 +4515,7 @@ entry:
define <vscale x 1 x half> @test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4559,7 +4559,7 @@ entry:
define <vscale x 2 x half> @test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4603,7 +4603,7 @@ entry:
define <vscale x 4 x half> @test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4647,7 +4647,7 @@ entry:
define <vscale x 1 x float> @test_vlseg2ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -4685,7 +4685,7 @@ entry:
define <vscale x 2 x float> @test_vlseg2ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -4723,7 +4723,7 @@ entry:
define <vscale x 4 x float> @test_vlseg2ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -4761,7 +4761,7 @@ entry:
define <vscale x 8 x float> @test_vlseg2ff_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -4799,7 +4799,7 @@ entry:
define <vscale x 1 x float> @test_vlseg3ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4838,7 +4838,7 @@ entry:
define <vscale x 2 x float> @test_vlseg3ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4877,7 +4877,7 @@ entry:
define <vscale x 4 x float> @test_vlseg3ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -4916,7 +4916,7 @@ entry:
define <vscale x 1 x float> @test_vlseg4ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4956,7 +4956,7 @@ entry:
define <vscale x 2 x float> @test_vlseg4ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4996,7 +4996,7 @@ entry:
define <vscale x 4 x float> @test_vlseg4ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -5036,7 +5036,7 @@ entry:
define <vscale x 1 x float> @test_vlseg5ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5077,7 +5077,7 @@ entry:
define <vscale x 2 x float> @test_vlseg5ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5118,7 +5118,7 @@ entry:
define <vscale x 1 x float> @test_vlseg6ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5160,7 +5160,7 @@ entry:
define <vscale x 2 x float> @test_vlseg6ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5202,7 +5202,7 @@ entry:
define <vscale x 1 x float> @test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5245,7 +5245,7 @@ entry:
define <vscale x 2 x float> @test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5288,7 +5288,7 @@ entry:
define <vscale x 1 x float> @test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5332,7 +5332,7 @@ entry:
define <vscale x 2 x float> @test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5376,7 +5376,7 @@ entry:
define <vscale x 1 x double> @test_vlseg2ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -5414,7 +5414,7 @@ entry:
define <vscale x 2 x double> @test_vlseg2ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -5452,7 +5452,7 @@ entry:
define <vscale x 4 x double> @test_vlseg2ff_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -5490,7 +5490,7 @@ entry:
define <vscale x 1 x double> @test_vlseg3ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5529,7 +5529,7 @@ entry:
define <vscale x 2 x double> @test_vlseg3ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -5568,7 +5568,7 @@ entry:
define <vscale x 1 x double> @test_vlseg4ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5608,7 +5608,7 @@ entry:
define <vscale x 2 x double> @test_vlseg4ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -5648,7 +5648,7 @@ entry:
define <vscale x 1 x double> @test_vlseg5ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5689,7 +5689,7 @@ entry:
define <vscale x 1 x double> @test_vlseg6ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5731,7 +5731,7 @@ entry:
define <vscale x 1 x double> @test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5774,7 +5774,7 @@ entry:
define <vscale x 1 x double> @test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5818,7 +5818,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg2ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -5856,7 +5856,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg2ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -5894,7 +5894,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg2ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -5932,7 +5932,7 @@ entry:
define <vscale x 8 x bfloat> @test_vlseg2ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -5970,7 +5970,7 @@ entry:
define <vscale x 16 x bfloat> @test_vlseg2ff_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -6008,7 +6008,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg3ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6047,7 +6047,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg3ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6086,7 +6086,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg3ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6125,7 +6125,7 @@ entry:
define <vscale x 8 x bfloat> @test_vlseg3ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -6164,7 +6164,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg4ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6204,7 +6204,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg4ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6244,7 +6244,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg4ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6284,7 +6284,7 @@ entry:
define <vscale x 8 x bfloat> @test_vlseg4ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -6324,7 +6324,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg5ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6365,7 +6365,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg5ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6406,7 +6406,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg5ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6447,7 +6447,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg6ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6489,7 +6489,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg6ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6531,7 +6531,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg6ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6573,7 +6573,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6616,7 +6616,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6659,7 +6659,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6702,7 +6702,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6746,7 +6746,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6790,7 +6790,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll
index c4c0a2ee101fd0..b87602e592f2be 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll
@@ -51,7 +51,7 @@ entry:
define <vscale x 16 x i16> @test_vlseg2ff_mask_dead_vl(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vlseg2ff_mask_dead_vl:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll
index 1f4c0e3ace3917..ec322c04d1ca06 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll
@@ -25,7 +25,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg2ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -65,7 +65,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg2ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -105,7 +105,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg2ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -145,7 +145,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg2ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -185,7 +185,7 @@ entry:
define <vscale x 16 x i8> @test_vlseg2ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -225,7 +225,7 @@ entry:
define <vscale x 32 x i8> @test_vlseg2ff_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 32 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -265,7 +265,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg3ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -306,7 +306,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg3ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -347,7 +347,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg3ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -388,7 +388,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg3ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -429,7 +429,7 @@ entry:
define <vscale x 16 x i8> @test_vlseg3ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -470,7 +470,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg4ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -512,7 +512,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg4ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -554,7 +554,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg4ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -596,7 +596,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg4ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -638,7 +638,7 @@ entry:
define <vscale x 16 x i8> @test_vlseg4ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -680,7 +680,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg5ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -723,7 +723,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg5ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -766,7 +766,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg5ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -809,7 +809,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg5ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -852,7 +852,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg6ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -896,7 +896,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg6ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -940,7 +940,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg6ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -984,7 +984,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg6ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1028,7 +1028,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1073,7 +1073,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1118,7 +1118,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1163,7 +1163,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1208,7 +1208,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1254,7 +1254,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1300,7 +1300,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1346,7 +1346,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1391,7 +1391,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg2ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1430,7 +1430,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg2ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1469,7 +1469,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg2ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1508,7 +1508,7 @@ entry:
define <vscale x 8 x i16> @test_vlseg2ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1547,7 +1547,7 @@ entry:
define <vscale x 16 x i16> @test_vlseg2ff_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1586,7 +1586,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg3ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1626,7 +1626,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg3ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1666,7 +1666,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg3ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1706,7 +1706,7 @@ entry:
define <vscale x 8 x i16> @test_vlseg3ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -1746,7 +1746,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg4ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1787,7 +1787,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg4ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1828,7 +1828,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg4ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1869,7 +1869,7 @@ entry:
define <vscale x 8 x i16> @test_vlseg4ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -1910,7 +1910,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg5ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1952,7 +1952,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg5ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1994,7 +1994,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg5ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2036,7 +2036,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg6ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2079,7 +2079,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg6ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2122,7 +2122,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg6ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2165,7 +2165,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2209,7 +2209,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2253,7 +2253,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2297,7 +2297,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2342,7 +2342,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2387,7 +2387,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2432,7 +2432,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg2ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -2471,7 +2471,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg2ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -2510,7 +2510,7 @@ entry:
define <vscale x 4 x i32> @test_vlseg2ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -2549,7 +2549,7 @@ entry:
define <vscale x 8 x i32> @test_vlseg2ff_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -2588,7 +2588,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg3ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2628,7 +2628,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg3ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2668,7 +2668,7 @@ entry:
define <vscale x 4 x i32> @test_vlseg3ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -2708,7 +2708,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg4ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2749,7 +2749,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg4ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2790,7 +2790,7 @@ entry:
define <vscale x 4 x i32> @test_vlseg4ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -2831,7 +2831,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg5ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2873,7 +2873,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg5ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2915,7 +2915,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg6ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2958,7 +2958,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg6ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3001,7 +3001,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3045,7 +3045,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3089,7 +3089,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3134,7 +3134,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3179,7 +3179,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg2ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -3218,7 +3218,7 @@ entry:
define <vscale x 2 x i64> @test_vlseg2ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -3257,7 +3257,7 @@ entry:
define <vscale x 4 x i64> @test_vlseg2ff_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -3296,7 +3296,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg3ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3336,7 +3336,7 @@ entry:
define <vscale x 2 x i64> @test_vlseg3ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -3376,7 +3376,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg4ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3417,7 +3417,7 @@ entry:
define <vscale x 2 x i64> @test_vlseg4ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -3458,7 +3458,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg5ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3500,7 +3500,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg6ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3543,7 +3543,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3587,7 +3587,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3631,7 +3631,7 @@ entry:
define <vscale x 1 x half> @test_vlseg2ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -3669,7 +3669,7 @@ entry:
define <vscale x 2 x half> @test_vlseg2ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -3707,7 +3707,7 @@ entry:
define <vscale x 4 x half> @test_vlseg2ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -3745,7 +3745,7 @@ entry:
define <vscale x 8 x half> @test_vlseg2ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -3783,7 +3783,7 @@ entry:
define <vscale x 16 x half> @test_vlseg2ff_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -3821,7 +3821,7 @@ entry:
define <vscale x 1 x half> @test_vlseg3ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3860,7 +3860,7 @@ entry:
define <vscale x 2 x half> @test_vlseg3ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3899,7 +3899,7 @@ entry:
define <vscale x 4 x half> @test_vlseg3ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3938,7 +3938,7 @@ entry:
define <vscale x 8 x half> @test_vlseg3ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -3977,7 +3977,7 @@ entry:
define <vscale x 1 x half> @test_vlseg4ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4017,7 +4017,7 @@ entry:
define <vscale x 2 x half> @test_vlseg4ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4057,7 +4057,7 @@ entry:
define <vscale x 4 x half> @test_vlseg4ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4097,7 +4097,7 @@ entry:
define <vscale x 8 x half> @test_vlseg4ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -4137,7 +4137,7 @@ entry:
define <vscale x 1 x half> @test_vlseg5ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4178,7 +4178,7 @@ entry:
define <vscale x 2 x half> @test_vlseg5ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4219,7 +4219,7 @@ entry:
define <vscale x 4 x half> @test_vlseg5ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4260,7 +4260,7 @@ entry:
define <vscale x 1 x half> @test_vlseg6ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4302,7 +4302,7 @@ entry:
define <vscale x 2 x half> @test_vlseg6ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4344,7 +4344,7 @@ entry:
define <vscale x 4 x half> @test_vlseg6ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4386,7 +4386,7 @@ entry:
define <vscale x 1 x half> @test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4429,7 +4429,7 @@ entry:
define <vscale x 2 x half> @test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4472,7 +4472,7 @@ entry:
define <vscale x 4 x half> @test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4515,7 +4515,7 @@ entry:
define <vscale x 1 x half> @test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4559,7 +4559,7 @@ entry:
define <vscale x 2 x half> @test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4603,7 +4603,7 @@ entry:
define <vscale x 4 x half> @test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4647,7 +4647,7 @@ entry:
define <vscale x 1 x float> @test_vlseg2ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -4685,7 +4685,7 @@ entry:
define <vscale x 2 x float> @test_vlseg2ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -4723,7 +4723,7 @@ entry:
define <vscale x 4 x float> @test_vlseg2ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -4761,7 +4761,7 @@ entry:
define <vscale x 8 x float> @test_vlseg2ff_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -4799,7 +4799,7 @@ entry:
define <vscale x 1 x float> @test_vlseg3ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4838,7 +4838,7 @@ entry:
define <vscale x 2 x float> @test_vlseg3ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4877,7 +4877,7 @@ entry:
define <vscale x 4 x float> @test_vlseg3ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -4916,7 +4916,7 @@ entry:
define <vscale x 1 x float> @test_vlseg4ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4956,7 +4956,7 @@ entry:
define <vscale x 2 x float> @test_vlseg4ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4996,7 +4996,7 @@ entry:
define <vscale x 4 x float> @test_vlseg4ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -5036,7 +5036,7 @@ entry:
define <vscale x 1 x float> @test_vlseg5ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5077,7 +5077,7 @@ entry:
define <vscale x 2 x float> @test_vlseg5ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5118,7 +5118,7 @@ entry:
define <vscale x 1 x float> @test_vlseg6ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5160,7 +5160,7 @@ entry:
define <vscale x 2 x float> @test_vlseg6ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5202,7 +5202,7 @@ entry:
define <vscale x 1 x float> @test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5245,7 +5245,7 @@ entry:
define <vscale x 2 x float> @test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5288,7 +5288,7 @@ entry:
define <vscale x 1 x float> @test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5332,7 +5332,7 @@ entry:
define <vscale x 2 x float> @test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5376,7 +5376,7 @@ entry:
define <vscale x 1 x double> @test_vlseg2ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -5414,7 +5414,7 @@ entry:
define <vscale x 2 x double> @test_vlseg2ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -5452,7 +5452,7 @@ entry:
define <vscale x 4 x double> @test_vlseg2ff_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -5490,7 +5490,7 @@ entry:
define <vscale x 1 x double> @test_vlseg3ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5529,7 +5529,7 @@ entry:
define <vscale x 2 x double> @test_vlseg3ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -5568,7 +5568,7 @@ entry:
define <vscale x 1 x double> @test_vlseg4ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5608,7 +5608,7 @@ entry:
define <vscale x 2 x double> @test_vlseg4ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -5648,7 +5648,7 @@ entry:
define <vscale x 1 x double> @test_vlseg5ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5689,7 +5689,7 @@ entry:
define <vscale x 1 x double> @test_vlseg6ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5731,7 +5731,7 @@ entry:
define <vscale x 1 x double> @test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5774,7 +5774,7 @@ entry:
define <vscale x 1 x double> @test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5818,7 +5818,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg2ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -5856,7 +5856,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg2ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -5894,7 +5894,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg2ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -5932,7 +5932,7 @@ entry:
define <vscale x 8 x bfloat> @test_vlseg2ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -5970,7 +5970,7 @@ entry:
define <vscale x 16 x bfloat> @test_vlseg2ff_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -6008,7 +6008,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg3ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6047,7 +6047,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg3ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6086,7 +6086,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg3ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6125,7 +6125,7 @@ entry:
define <vscale x 8 x bfloat> @test_vlseg3ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -6164,7 +6164,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg4ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6204,7 +6204,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg4ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6244,7 +6244,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg4ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6284,7 +6284,7 @@ entry:
define <vscale x 8 x bfloat> @test_vlseg4ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -6324,7 +6324,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg5ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6365,7 +6365,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg5ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6406,7 +6406,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg5ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6447,7 +6447,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg6ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6489,7 +6489,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg6ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6531,7 +6531,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg6ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6573,7 +6573,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6616,7 +6616,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6659,7 +6659,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6702,7 +6702,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6746,7 +6746,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6790,7 +6790,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
index bba24dd3b09515..a9a6521aaa9df8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
@@ -412,7 +412,7 @@ declare <vscale x 128 x i8> @llvm.vp.smax.nxv128i8(<vscale x 128 x i8>, <vscale
define <vscale x 128 x i8> @vmax_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -975,7 +975,7 @@ declare <vscale x 32 x i32> @llvm.vp.smax.nxv32i32(<vscale x 32 x i32>, <vscale
define <vscale x 32 x i32> @vmax_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: srli a3, a2, 2
@@ -1036,7 +1036,7 @@ declare i32 @llvm.vscale.i32()
define <vscale x 32 x i32> @vmax_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
; CHECK-LABEL: vmax_vx_nxv32i32_evl_nx8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a3, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
index 61b416f9f1b429..37f42db58ef3cb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
@@ -410,7 +410,7 @@ declare <vscale x 128 x i8> @llvm.vp.umax.nxv128i8(<vscale x 128 x i8>, <vscale
define <vscale x 128 x i8> @vmaxu_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -973,7 +973,7 @@ declare <vscale x 32 x i32> @llvm.vp.umax.nxv32i32(<vscale x 32 x i32>, <vscale
define <vscale x 32 x i32> @vmaxu_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: srli a3, a2, 2
@@ -1034,7 +1034,7 @@ declare i32 @llvm.vscale.i32()
define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
; CHECK-LABEL: vmaxu_vx_nxv32i32_evl_nx8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a3, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll
index 19e7cdaff0ffec..b82fe5a19ea7e9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll
@@ -658,7 +658,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16.f16(
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -706,7 +706,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16.f16(
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -754,7 +754,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16.f16(
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -802,7 +802,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16.f16(
define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -850,7 +850,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16.f16(
define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -898,7 +898,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32(
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -946,7 +946,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32(
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -994,7 +994,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32(
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -1042,7 +1042,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32(
define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -1090,7 +1090,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64(
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -1138,7 +1138,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64(
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -1186,7 +1186,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64(
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge.ll
index 70dff36191e9e8..3240190bd2b381 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfge.ll
@@ -658,7 +658,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16.f16(
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -706,7 +706,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16.f16(
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -754,7 +754,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16.f16(
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -802,7 +802,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16.f16(
define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -850,7 +850,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16.f16(
define <vscale x 16 x i1> @intrinsic_vmfge_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -898,7 +898,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32(
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -946,7 +946,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32(
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -994,7 +994,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32(
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -1042,7 +1042,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32(
define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -1090,7 +1090,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64(
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -1138,7 +1138,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64(
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -1186,7 +1186,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64(
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll
index 19d0c1e6e0c822..21e935a0cb4314 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll
@@ -658,7 +658,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16.f16(
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -706,7 +706,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16.f16(
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -754,7 +754,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16.f16(
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -802,7 +802,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16.f16(
define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -850,7 +850,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16.f16(
define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -898,7 +898,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32(
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -946,7 +946,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32(
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -994,7 +994,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32(
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -1042,7 +1042,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32(
define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -1090,7 +1090,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64(
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -1138,7 +1138,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64(
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -1186,7 +1186,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64(
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle.ll
index 9c5bfdc969f2a5..3937cd7b67025a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfle.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfle.ll
@@ -658,7 +658,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16.f16(
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -706,7 +706,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16.f16(
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -754,7 +754,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16.f16(
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -802,7 +802,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16.f16(
define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -850,7 +850,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16.f16(
define <vscale x 16 x i1> @intrinsic_vmfle_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -898,7 +898,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32(
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -946,7 +946,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32(
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -994,7 +994,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32(
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -1042,7 +1042,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32(
define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -1090,7 +1090,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64(
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -1138,7 +1138,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64(
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -1186,7 +1186,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64(
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt.ll
index c7eb5c20b72c9a..26c5eedc6aafd0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmflt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmflt.ll
@@ -658,7 +658,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16.f16(
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -706,7 +706,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16.f16(
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -754,7 +754,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16.f16(
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -802,7 +802,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16.f16(
define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -850,7 +850,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16.f16(
define <vscale x 16 x i1> @intrinsic_vmflt_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -898,7 +898,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32(
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -946,7 +946,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32(
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -994,7 +994,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32(
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -1042,7 +1042,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32(
define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -1090,7 +1090,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64(
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -1138,7 +1138,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64(
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -1186,7 +1186,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64(
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne.ll
index 23b5fb53b085b5..827c7b436af8fa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfne.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfne.ll
@@ -658,7 +658,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16(
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -706,7 +706,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16(
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -754,7 +754,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16(
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -802,7 +802,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16(
define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -850,7 +850,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16(
define <vscale x 16 x i1> @intrinsic_vmfne_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -898,7 +898,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32(
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -946,7 +946,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32(
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -994,7 +994,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32(
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -1042,7 +1042,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32(
define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -1090,7 +1090,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64(
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -1138,7 +1138,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64(
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -1186,7 +1186,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64(
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
index 3c7b03469e1165..0ac38775fbe0b0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
@@ -412,7 +412,7 @@ declare <vscale x 128 x i8> @llvm.vp.smin.nxv128i8(<vscale x 128 x i8>, <vscale
define <vscale x 128 x i8> @vmin_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -975,7 +975,7 @@ declare <vscale x 32 x i32> @llvm.vp.smin.nxv32i32(<vscale x 32 x i32>, <vscale
define <vscale x 32 x i32> @vmin_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: srli a3, a2, 2
@@ -1036,7 +1036,7 @@ declare i32 @llvm.vscale.i32()
define <vscale x 32 x i32> @vmin_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
; CHECK-LABEL: vmin_vx_nxv32i32_evl_nx8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a3, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
index 4da5a7e476870d..06cbc88dc200b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
@@ -410,7 +410,7 @@ declare <vscale x 128 x i8> @llvm.vp.umin.nxv128i8(<vscale x 128 x i8>, <vscale
define <vscale x 128 x i8> @vminu_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -973,7 +973,7 @@ declare <vscale x 32 x i32> @llvm.vp.umin.nxv32i32(<vscale x 32 x i32>, <vscale
define <vscale x 32 x i32> @vminu_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: srli a3, a2, 2
@@ -1034,7 +1034,7 @@ declare i32 @llvm.vscale.i32()
define <vscale x 32 x i32> @vminu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
; CHECK-LABEL: vminu_vx_nxv32i32_evl_nx8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a3, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll
index 0be156aad1cb28..92d1668967e5ce 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll
@@ -31,7 +31,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -74,7 +74,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1(
define <vscale x 2 x i1> @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -117,7 +117,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1(
define <vscale x 4 x i1> @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1(
define <vscale x 8 x i1> @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -203,7 +203,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1(
define <vscale x 16 x i1> @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -246,7 +246,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1(
define <vscale x 32 x i1> @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -289,7 +289,7 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq.ll
index 868273c1ea1b32..4ae487fcf36c53 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmseq.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmseq.ll
@@ -970,7 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1018,7 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1066,7 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1114,7 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1162,7 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1210,7 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1258,7 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1306,7 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1354,7 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1402,7 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1450,7 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1498,7 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1546,7 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1594,7 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1642,7 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1717,7 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1792,7 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1867,7 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1903,7 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1939,7 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1975,7 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2011,7 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2047,7 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2083,7 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmseq_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2119,7 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2155,7 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2191,7 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2227,7 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2263,7 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2299,7 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2335,7 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2371,7 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2407,7 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2443,7 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2479,7 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2515,7 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge.ll
index f3071bf7a2302a..fa6273b6fd012b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsge.ll
@@ -971,7 +971,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1020,7 +1020,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1069,7 +1069,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1090,7 +1090,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8_1(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8_1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: li a1, 99
; CHECK-NEXT: vmv1r.v v0, v9
@@ -1158,7 +1158,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1207,7 +1207,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1256,7 +1256,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsge_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1305,7 +1305,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1354,7 +1354,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1403,7 +1403,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1452,7 +1452,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1501,7 +1501,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1550,7 +1550,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1599,7 +1599,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1648,7 +1648,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1697,7 +1697,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1773,7 +1773,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1849,7 +1849,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1925,7 +1925,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1961,7 +1961,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1997,7 +1997,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -2064,7 +2064,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2100,7 +2100,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2136,7 +2136,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2172,7 +2172,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsge_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2208,7 +2208,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2244,7 +2244,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2280,7 +2280,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2316,7 +2316,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2352,7 +2352,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2388,7 +2388,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2424,7 +2424,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2460,7 +2460,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2496,7 +2496,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2532,7 +2532,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2568,7 +2568,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2604,7 +2604,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
index d09c85c43647fb..6cfd0ed7318e7f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
@@ -971,7 +971,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1020,7 +1020,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1069,7 +1069,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1118,7 +1118,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1167,7 +1167,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1216,7 +1216,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1265,7 +1265,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1314,7 +1314,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1363,7 +1363,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1412,7 +1412,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1461,7 +1461,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1510,7 +1510,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1559,7 +1559,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1608,7 +1608,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1657,7 +1657,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1733,7 +1733,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1809,7 +1809,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1921,7 +1921,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1957,7 +1957,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1993,7 +1993,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2014,7 +2014,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8_1(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8_1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: li a1, 99
; CHECK-NEXT: vmv1r.v v0, v9
@@ -2051,7 +2051,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2087,7 +2087,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2123,7 +2123,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2159,7 +2159,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2258,7 +2258,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2294,7 +2294,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2330,7 +2330,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2366,7 +2366,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2402,7 +2402,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2438,7 +2438,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2474,7 +2474,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2510,7 +2510,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2546,7 +2546,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2582,7 +2582,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
index dd0e73d9bc1acb..668de9b965d6dd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
@@ -970,7 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1018,7 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1066,7 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1114,7 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1162,7 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1210,7 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1258,7 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1306,7 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1354,7 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1402,7 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1450,7 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1498,7 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1546,7 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1594,7 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1642,7 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1717,7 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1792,7 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1867,7 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1903,7 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1939,7 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1975,7 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2011,7 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2047,7 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2083,7 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2119,7 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2155,7 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2191,7 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2227,7 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2263,7 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2299,7 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2335,7 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2371,7 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2407,7 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2443,7 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2479,7 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2515,7 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll
index f9e4b6aab11b7d..bece4c9c53f783 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll
@@ -970,7 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1018,7 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1066,7 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1114,7 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1162,7 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1210,7 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1258,7 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1306,7 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1354,7 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1402,7 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1450,7 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1498,7 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1546,7 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1594,7 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1642,7 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1717,7 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1792,7 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1867,7 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1903,7 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1939,7 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1975,7 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2011,7 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2047,7 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2083,7 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2119,7 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2155,7 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2191,7 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2227,7 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2263,7 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2299,7 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2335,7 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2371,7 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2407,7 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2443,7 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2479,7 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2515,7 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll
index 065aeafd3f97b8..ad05e4ec993b2f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsif.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll
@@ -31,7 +31,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1(
define <vscale x 1 x i1> @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -74,7 +74,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1(
define <vscale x 2 x i1> @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -117,7 +117,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1(
define <vscale x 4 x i1> @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1(
define <vscale x 8 x i1> @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1_nxv8i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -203,7 +203,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1(
define <vscale x 16 x i1> @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1_nxv16i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -246,7 +246,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1(
define <vscale x 32 x i1> @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1_nxv32i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -289,7 +289,7 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1(
define <vscale x 64 x i1> @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle.ll
index c12cf515d32e9f..ca78acc94560d7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsle.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsle.ll
@@ -970,7 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1018,7 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1066,7 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1114,7 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1162,7 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1210,7 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1258,7 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1306,7 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1354,7 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1402,7 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1450,7 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1498,7 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1546,7 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1594,7 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1642,7 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1717,7 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1792,7 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1867,7 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1903,7 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1939,7 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1975,7 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2011,7 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2047,7 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2083,7 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsle_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2119,7 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2155,7 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2191,7 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2227,7 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2263,7 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2299,7 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2335,7 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2371,7 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2407,7 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2443,7 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2479,7 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2515,7 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll
index a516834876a668..5d17abb1044e1f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll
@@ -970,7 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1018,7 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1066,7 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1114,7 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1162,7 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1210,7 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1258,7 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1306,7 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1354,7 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1402,7 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1450,7 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1498,7 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1546,7 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1594,7 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1642,7 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1717,7 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1792,7 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1867,7 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1903,7 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1939,7 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1975,7 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2011,7 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2047,7 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2083,7 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2119,7 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2155,7 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2191,7 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2227,7 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2263,7 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2299,7 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2335,7 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2371,7 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2407,7 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2443,7 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2479,7 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2515,7 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt.ll
index afef755ddec915..6bb8710d42f7b5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmslt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmslt.ll
@@ -970,7 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1018,7 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1066,7 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1114,7 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1162,7 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1210,7 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1258,7 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1306,7 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1354,7 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1402,7 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1450,7 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1498,7 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1546,7 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1594,7 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1642,7 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1717,7 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1792,7 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1867,7 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1903,7 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1939,7 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1975,7 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2011,7 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2047,7 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2083,7 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2119,7 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2155,7 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2191,7 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2227,7 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2263,7 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2299,7 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2335,7 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2371,7 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2407,7 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2443,7 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2479,7 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2515,7 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll
index beaf809a83ab10..ae981d700a84a4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll
@@ -970,7 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1018,7 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1066,7 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1114,7 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1162,7 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1210,7 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1258,7 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1306,7 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1354,7 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1402,7 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1450,7 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1498,7 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1546,7 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1594,7 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1642,7 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1717,7 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1792,7 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1867,7 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1903,7 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1939,7 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1975,7 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2011,7 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2047,7 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2083,7 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2119,7 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2155,7 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2191,7 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2227,7 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2263,7 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2299,7 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2335,7 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2371,7 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2407,7 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2443,7 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2479,7 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2515,7 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne.ll
index dc3a50ad7bd687..54fee4a68aca2c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsne.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsne.ll
@@ -970,7 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1018,7 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1066,7 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1114,7 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1162,7 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1210,7 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1258,7 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1306,7 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1354,7 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1402,7 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1450,7 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1498,7 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1546,7 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1594,7 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1642,7 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1717,7 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1792,7 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1867,7 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1903,7 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1939,7 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1975,7 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2011,7 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2047,7 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2083,7 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsne_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2119,7 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2155,7 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2191,7 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2227,7 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2263,7 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2299,7 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2335,7 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2371,7 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2407,7 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2443,7 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2479,7 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2515,7 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll
index 38925934db2f28..e4b9c39fb85083 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsof.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll
@@ -31,7 +31,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1(
define <vscale x 1 x i1> @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -74,7 +74,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1(
define <vscale x 2 x i1> @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -117,7 +117,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1(
define <vscale x 4 x i1> @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1(
define <vscale x 8 x i1> @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1_nxv8i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -203,7 +203,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1(
define <vscale x 16 x i1> @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1_nxv16i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -246,7 +246,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1(
define <vscale x 32 x i1> @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1_nxv32i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -289,7 +289,7 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1(
define <vscale x 64 x i1> @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
index 33e88360e271c7..588dd80923f8ba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
@@ -49,7 +49,7 @@ define <vscale x 4 x i32> @vadd_same_passthru(<vscale x 4 x i32> %passthru, <vsc
define <vscale x 4 x i32> @unfoldable_diff_avl_unknown(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1, iXLen %vl2) {
; CHECK-LABEL: unfoldable_diff_avl_unknown:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v14, v8
; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
; CHECK-NEXT: vadd.vv v14, v10, v12
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll b/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll
index 47d9f3fde7cd06..219269dc109b46 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll
@@ -5,7 +5,7 @@
define iXLen @bool_vec(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m, i32 %evl) {
; RV32-LABEL: bool_vec:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v9, v0
; RV32-NEXT: vmv1r.v v0, v8
; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
@@ -18,7 +18,7 @@ define iXLen @bool_vec(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m, i32 %evl) {
;
; RV64-LABEL: bool_vec:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v9, v0
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: srli a0, a0, 32
@@ -37,7 +37,7 @@ define iXLen @bool_vec(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m, i32 %evl) {
define iXLen @bool_vec_zero_poison(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m, i32 %evl) {
; RV32-LABEL: bool_vec_zero_poison:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v9, v0
; RV32-NEXT: vmv1r.v v0, v8
; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
@@ -46,7 +46,7 @@ define iXLen @bool_vec_zero_poison(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m,
;
; RV64-LABEL: bool_vec_zero_poison:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v9, v0
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: srli a0, a0, 32
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-select.ll b/llvm/test/CodeGen/RISCV/rvv/vp-select.ll
index 912abd6dc36dcc..421afd746cfc07 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-select.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-select.ll
@@ -12,7 +12,7 @@ define <vscale x 1 x i64> @all_ones(<vscale x 1 x i64> %true, <vscale x 1 x i64>
define <vscale x 1 x i64> @all_zeroes(<vscale x 1 x i64> %true, <vscale x 1 x i64> %false, i32 %evl) {
; CHECK-LABEL: all_zeroes:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> splat (i1 false), <vscale x 1 x i64> %true, <vscale x 1 x i64> %false, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
index 6839bb647b5358..50170a009e5640 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
@@ -10,7 +10,7 @@ declare <16 x i1> @llvm.experimental.vp.splice.v16i1(<16 x i1>, <16 x i1>, i32,
define <2 x i1> @test_vp_splice_v2i1(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -35,7 +35,7 @@ define <2 x i1> @test_vp_splice_v2i1(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %ev
define <2 x i1> @test_vp_splice_v2i1_negative_offset(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v2i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -60,7 +60,7 @@ define <2 x i1> @test_vp_splice_v2i1_negative_offset(<2 x i1> %va, <2 x i1> %vb,
define <2 x i1> @test_vp_splice_v2i1_masked(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v2i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -86,7 +86,7 @@ define <2 x i1> @test_vp_splice_v2i1_masked(<2 x i1> %va, <2 x i1> %vb, <2 x i1>
define <4 x i1> @test_vp_splice_v4i1(<4 x i1> %va, <4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -111,7 +111,7 @@ define <4 x i1> @test_vp_splice_v4i1(<4 x i1> %va, <4 x i1> %vb, i32 zeroext %ev
define <4 x i1> @test_vp_splice_v4i1_negative_offset(<4 x i1> %va, <4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v4i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -136,7 +136,7 @@ define <4 x i1> @test_vp_splice_v4i1_negative_offset(<4 x i1> %va, <4 x i1> %vb,
define <4 x i1> @test_vp_splice_v4i1_masked(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v4i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -162,7 +162,7 @@ define <4 x i1> @test_vp_splice_v4i1_masked(<4 x i1> %va, <4 x i1> %vb, <4 x i1>
define <8 x i1> @test_vp_splice_v8i1(<8 x i1> %va, <8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -187,7 +187,7 @@ define <8 x i1> @test_vp_splice_v8i1(<8 x i1> %va, <8 x i1> %vb, i32 zeroext %ev
define <8 x i1> @test_vp_splice_v8i1_negative_offset(<8 x i1> %va, <8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v8i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -212,7 +212,7 @@ define <8 x i1> @test_vp_splice_v8i1_negative_offset(<8 x i1> %va, <8 x i1> %vb,
define <8 x i1> @test_vp_splice_v8i1_masked(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v8i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -238,7 +238,7 @@ define <8 x i1> @test_vp_splice_v8i1_masked(<8 x i1> %va, <8 x i1> %vb, <8 x i1>
define <16 x i1> @test_vp_splice_v16i1(<16 x i1> %va, <16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -263,7 +263,7 @@ define <16 x i1> @test_vp_splice_v16i1(<16 x i1> %va, <16 x i1> %vb, i32 zeroext
define <16 x i1> @test_vp_splice_v16i1_negative_offset(<16 x i1> %va, <16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v16i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -288,7 +288,7 @@ define <16 x i1> @test_vp_splice_v16i1_negative_offset(<16 x i1> %va, <16 x i1>
define <16 x i1> @test_vp_splice_v16i1_masked(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v16i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
index 9a8c90efe526ef..178de322d7e036 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
@@ -13,7 +13,7 @@ declare <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i
define <vscale x 1 x i1> @test_vp_splice_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -38,7 +38,7 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1(<vscale x 1 x i1> %va, <vscale x
define <vscale x 1 x i1> @test_vp_splice_nxv1i1_negative_offset(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv1i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -63,7 +63,7 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1_negative_offset(<vscale x 1 x i1
define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, <vscale x 1 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv1i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -89,7 +89,7 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <v
define <vscale x 2 x i1> @test_vp_splice_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -114,7 +114,7 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1(<vscale x 2 x i1> %va, <vscale x
define <vscale x 2 x i1> @test_vp_splice_nxv2i1_negative_offset(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv2i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -139,7 +139,7 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1_negative_offset(<vscale x 2 x i1
define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv2i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -165,7 +165,7 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <v
define <vscale x 4 x i1> @test_vp_splice_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -190,7 +190,7 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1(<vscale x 4 x i1> %va, <vscale x
define <vscale x 4 x i1> @test_vp_splice_nxv4i1_negative_offset(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv4i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -215,7 +215,7 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1_negative_offset(<vscale x 4 x i1
define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, <vscale x 4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv4i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -241,7 +241,7 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <v
define <vscale x 8 x i1> @test_vp_splice_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -266,7 +266,7 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1(<vscale x 8 x i1> %va, <vscale x
define <vscale x 8 x i1> @test_vp_splice_nxv8i1_negative_offset(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv8i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -291,7 +291,7 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1_negative_offset(<vscale x 8 x i1
define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, <vscale x 8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv8i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -317,7 +317,7 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <v
define <vscale x 16 x i1> @test_vp_splice_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -342,7 +342,7 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1(<vscale x 16 x i1> %va, <vscal
define <vscale x 16 x i1> @test_vp_splice_nxv16i1_negative_offset(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv16i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -367,7 +367,7 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1_negative_offset(<vscale x 16 x
define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, <vscale x 16 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv16i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -394,7 +394,7 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va,
define <vscale x 32 x i1> @test_vp_splice_nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv32i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -419,7 +419,7 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1(<vscale x 32 x i1> %va, <vscal
define <vscale x 32 x i1> @test_vp_splice_nxv32i1_negative_offset(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv32i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -444,7 +444,7 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1_negative_offset(<vscale x 32 x
define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, <vscale x 32 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv32i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -471,7 +471,7 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va,
define <vscale x 64 x i1> @test_vp_splice_nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv64i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -496,7 +496,7 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1(<vscale x 64 x i1> %va, <vscal
define <vscale x 64 x i1> @test_vp_splice_nxv64i1_negative_offset(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv64i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -521,7 +521,7 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1_negative_offset(<vscale x 64 x
define <vscale x 64 x i1> @test_vp_splice_nxv64i1_masked(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, <vscale x 64 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv64i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
index f8294155e82ad3..2452b1e8d5f8a5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
@@ -258,7 +258,7 @@ declare <vscale x 32 x i8> @llvm.vp.gather.nxv32i8.nxv32p0(<vscale x 32 x ptr>,
define <vscale x 32 x i8> @vpgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8> %idxs, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_nxv32i8:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v12, v0
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a2, a3, 1
@@ -286,7 +286,7 @@ define <vscale x 32 x i8> @vpgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8
;
; RV64-LABEL: vpgather_baseidx_nxv32i8:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: csrr a2, vlenb
; RV64-NEXT: slli a3, a2, 1
@@ -2459,7 +2459,7 @@ declare <vscale x 16 x double> @llvm.vp.gather.nxv16f64.nxv16p0(<vscale x 16 x p
define <vscale x 16 x double> @vpgather_nxv16f64(<vscale x 16 x ptr> %ptrs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v24, v0
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: sub a2, a0, a1
@@ -2483,7 +2483,7 @@ define <vscale x 16 x double> @vpgather_nxv16f64(<vscale x 16 x ptr> %ptrs, <vsc
;
; RV64-LABEL: vpgather_nxv16f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v24, v0
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: sub a2, a0, a1
@@ -2510,7 +2510,7 @@ define <vscale x 16 x double> @vpgather_nxv16f64(<vscale x 16 x ptr> %ptrs, <vsc
define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_nxv16i16_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v12, v0
; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
; RV32-NEXT: vsext.vf2 v16, v8
@@ -2536,7 +2536,7 @@ define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vs
;
; RV64-LABEL: vpgather_baseidx_nxv16i16_nxv16f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v10
@@ -2570,7 +2570,7 @@ define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vs
define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_sext_nxv16i16_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v12, v0
; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
; RV32-NEXT: vsext.vf2 v16, v8
@@ -2596,7 +2596,7 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base
;
; RV64-LABEL: vpgather_baseidx_sext_nxv16i16_nxv16f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v10
@@ -2631,7 +2631,7 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base
define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv16i16_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV32-NEXT: vmv1r.v v12, v0
; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
; RV32-NEXT: vzext.vf2 v16, v8
@@ -2657,7 +2657,7 @@ define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(ptr %base
;
; RV64-LABEL: vpgather_baseidx_zext_nxv16i16_nxv16f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: vsetvli a2, zero, e32, m8, ta, ma
; RV64-NEXT: vzext.vf2 v16, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpload.ll b/llvm/test/CodeGen/RISCV/rvv/vpload.ll
index 1dbc115a155aa2..d26b6dc9210250 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpload.ll
@@ -522,7 +522,7 @@ declare <vscale x 16 x double> @llvm.vp.load.nxv16f64.p0(ptr, <vscale x 16 x i1>
define <vscale x 16 x double> @vpload_nxv16f64(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: sub a3, a1, a2
@@ -562,7 +562,7 @@ declare <vscale x 16 x double> @llvm.vector.extract.nxv16f64(<vscale x 17 x doub
define <vscale x 16 x double> @vpload_nxv17f64(ptr %ptr, ptr %out, <vscale x 17 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv17f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: slli a5, a3, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
index ab539a03ea18c2..8c4d2e077f31cb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
@@ -361,7 +361,7 @@ define <vscale x 128 x i8> @vpmerge_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vmv8r.v v24, v16
; CHECK-NEXT: addi a1, sp, 16
@@ -402,7 +402,7 @@ define <vscale x 128 x i8> @vpmerge_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
define <vscale x 128 x i8> @vpmerge_vx_nxv128i8(i8 %a, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpmerge_vx_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -431,7 +431,7 @@ define <vscale x 128 x i8> @vpmerge_vx_nxv128i8(i8 %a, <vscale x 128 x i8> %vb,
define <vscale x 128 x i8> @vpmerge_vi_nxv128i8(<vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpmerge_vi_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
index c3c259e10172d7..bbd0f8cbd8b5a8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
@@ -468,7 +468,7 @@ define void @vpstore_nxv17f64(<vscale x 17 x double> %val, ptr %ptr, <vscale x 1
; CHECK-NEXT: slli a3, a3, 3
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
@@ -484,7 +484,7 @@ define void @vpstore_nxv17f64(<vscale x 17 x double> %val, ptr %ptr, <vscale x 1
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a6, a3
; CHECK-NEXT: .LBB36_4:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vl8re64.v v16, (a0)
; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
index 31766803364861..12db6facd35b66 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
@@ -23,7 +23,7 @@ declare i1 @llvm.vp.reduce.or.nxv1i1(i1, <vscale x 1 x i1>, <vscale x 1 x i1>, i
define zeroext i1 @vpreduce_or_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -40,7 +40,7 @@ declare i1 @llvm.vp.reduce.xor.nxv1i1(i1, <vscale x 1 x i1>, <vscale x 1 x i1>,
define zeroext i1 @vpreduce_xor_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -73,7 +73,7 @@ declare i1 @llvm.vp.reduce.or.nxv2i1(i1, <vscale x 2 x i1>, <vscale x 2 x i1>, i
define zeroext i1 @vpreduce_or_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -90,7 +90,7 @@ declare i1 @llvm.vp.reduce.xor.nxv2i1(i1, <vscale x 2 x i1>, <vscale x 2 x i1>,
define zeroext i1 @vpreduce_xor_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -123,7 +123,7 @@ declare i1 @llvm.vp.reduce.or.nxv4i1(i1, <vscale x 4 x i1>, <vscale x 4 x i1>, i
define zeroext i1 @vpreduce_or_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -140,7 +140,7 @@ declare i1 @llvm.vp.reduce.xor.nxv4i1(i1, <vscale x 4 x i1>, <vscale x 4 x i1>,
define zeroext i1 @vpreduce_xor_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -173,7 +173,7 @@ declare i1 @llvm.vp.reduce.or.nxv8i1(i1, <vscale x 8 x i1>, <vscale x 8 x i1>, i
define zeroext i1 @vpreduce_or_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -190,7 +190,7 @@ declare i1 @llvm.vp.reduce.xor.nxv8i1(i1, <vscale x 8 x i1>, <vscale x 8 x i1>,
define zeroext i1 @vpreduce_xor_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -223,7 +223,7 @@ declare i1 @llvm.vp.reduce.or.nxv16i1(i1, <vscale x 16 x i1>, <vscale x 16 x i1>
define zeroext i1 @vpreduce_or_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -240,7 +240,7 @@ declare i1 @llvm.vp.reduce.xor.nxv16i1(i1, <vscale x 16 x i1>, <vscale x 16 x i1
define zeroext i1 @vpreduce_xor_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -273,7 +273,7 @@ declare i1 @llvm.vp.reduce.or.nxv32i1(i1, <vscale x 32 x i1>, <vscale x 32 x i1>
define zeroext i1 @vpreduce_or_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv32i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -290,7 +290,7 @@ declare i1 @llvm.vp.reduce.xor.nxv32i1(i1, <vscale x 32 x i1>, <vscale x 32 x i1
define zeroext i1 @vpreduce_xor_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv32i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -307,7 +307,7 @@ declare i1 @llvm.vp.reduce.or.nxv40i1(i1, <vscale x 40 x i1>, <vscale x 40 x i1>
define zeroext i1 @vpreduce_or_nxv40i1(i1 zeroext %s, <vscale x 40 x i1> %v, <vscale x 40 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv40i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -340,7 +340,7 @@ declare i1 @llvm.vp.reduce.or.nxv64i1(i1, <vscale x 64 x i1>, <vscale x 64 x i1>
define zeroext i1 @vpreduce_or_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv64i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -357,7 +357,7 @@ declare i1 @llvm.vp.reduce.xor.nxv64i1(i1, <vscale x 64 x i1>, <vscale x 64 x i1
define zeroext i1 @vpreduce_xor_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv64i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -374,7 +374,7 @@ declare i1 @llvm.vp.reduce.or.nxv128i1(i1, <vscale x 128 x i1>, <vscale x 128 x
define zeroext i1 @vpreduce_or_nxv128i1(i1 zeroext %s, <vscale x 128 x i1> %v, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv128i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 3
@@ -406,7 +406,7 @@ declare i1 @llvm.vp.reduce.add.nxv1i1(i1, <vscale x 1 x i1>, <vscale x 1 x i1>,
define zeroext i1 @vpreduce_add_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -423,7 +423,7 @@ declare i1 @llvm.vp.reduce.add.nxv2i1(i1, <vscale x 2 x i1>, <vscale x 2 x i1>,
define zeroext i1 @vpreduce_add_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -440,7 +440,7 @@ declare i1 @llvm.vp.reduce.add.nxv4i1(i1, <vscale x 4 x i1>, <vscale x 4 x i1>,
define zeroext i1 @vpreduce_add_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -457,7 +457,7 @@ declare i1 @llvm.vp.reduce.add.nxv8i1(i1, <vscale x 8 x i1>, <vscale x 8 x i1>,
define zeroext i1 @vpreduce_add_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -474,7 +474,7 @@ declare i1 @llvm.vp.reduce.add.nxv16i1(i1, <vscale x 16 x i1>, <vscale x 16 x i1
define zeroext i1 @vpreduce_add_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -491,7 +491,7 @@ declare i1 @llvm.vp.reduce.add.nxv32i1(i1, <vscale x 32 x i1>, <vscale x 32 x i1
define zeroext i1 @vpreduce_add_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv32i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -508,7 +508,7 @@ declare i1 @llvm.vp.reduce.add.nxv64i1(i1, <vscale x 64 x i1>, <vscale x 64 x i1
define zeroext i1 @vpreduce_add_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv64i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -638,7 +638,7 @@ declare i1 @llvm.vp.reduce.smin.nxv1i1(i1, <vscale x 1 x i1>, <vscale x 1 x i1>,
define zeroext i1 @vpreduce_smin_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -655,7 +655,7 @@ declare i1 @llvm.vp.reduce.smin.nxv2i1(i1, <vscale x 2 x i1>, <vscale x 2 x i1>,
define zeroext i1 @vpreduce_smin_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -672,7 +672,7 @@ declare i1 @llvm.vp.reduce.smin.nxv4i1(i1, <vscale x 4 x i1>, <vscale x 4 x i1>,
define zeroext i1 @vpreduce_smin_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -689,7 +689,7 @@ declare i1 @llvm.vp.reduce.smin.nxv8i1(i1, <vscale x 8 x i1>, <vscale x 8 x i1>,
define zeroext i1 @vpreduce_smin_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -706,7 +706,7 @@ declare i1 @llvm.vp.reduce.smin.nxv16i1(i1, <vscale x 16 x i1>, <vscale x 16 x i
define zeroext i1 @vpreduce_smin_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -723,7 +723,7 @@ declare i1 @llvm.vp.reduce.smin.nxv32i1(i1, <vscale x 32 x i1>, <vscale x 32 x i
define zeroext i1 @vpreduce_smin_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv32i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -740,7 +740,7 @@ declare i1 @llvm.vp.reduce.smin.nxv64i1(i1, <vscale x 64 x i1>, <vscale x 64 x i
define zeroext i1 @vpreduce_smin_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv64i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -757,7 +757,7 @@ declare i1 @llvm.vp.reduce.umax.nxv1i1(i1, <vscale x 1 x i1>, <vscale x 1 x i1>,
define zeroext i1 @vpreduce_umax_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -774,7 +774,7 @@ declare i1 @llvm.vp.reduce.umax.nxv2i1(i1, <vscale x 2 x i1>, <vscale x 2 x i1>,
define zeroext i1 @vpreduce_umax_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -791,7 +791,7 @@ declare i1 @llvm.vp.reduce.umax.nxv4i1(i1, <vscale x 4 x i1>, <vscale x 4 x i1>,
define zeroext i1 @vpreduce_umax_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -808,7 +808,7 @@ declare i1 @llvm.vp.reduce.umax.nxv8i1(i1, <vscale x 8 x i1>, <vscale x 8 x i1>,
define zeroext i1 @vpreduce_umax_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -825,7 +825,7 @@ declare i1 @llvm.vp.reduce.umax.nxv16i1(i1, <vscale x 16 x i1>, <vscale x 16 x i
define zeroext i1 @vpreduce_umax_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -842,7 +842,7 @@ declare i1 @llvm.vp.reduce.umax.nxv32i1(i1, <vscale x 32 x i1>, <vscale x 32 x i
define zeroext i1 @vpreduce_umax_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv32i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -859,7 +859,7 @@ declare i1 @llvm.vp.reduce.umax.nxv64i1(i1, <vscale x 64 x i1>, <vscale x 64 x i
define zeroext i1 @vpreduce_umax_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv64i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
index 9fc86aee775319..04e580127117ce 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
@@ -22,7 +22,7 @@ define internal void @foo(<vscale x 1 x i16> %v15, <vscale x 1 x i16> %0, <vscal
; NOSUBREG-NEXT: .LBB0_1: # %loopIR3.i.i
; NOSUBREG-NEXT: # =>This Inner Loop Header: Depth=1
; NOSUBREG-NEXT: vl1r.v v9, (zero)
-; NOSUBREG-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; NOSUBREG-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; NOSUBREG-NEXT: vmv1r.v v13, v12
; NOSUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; NOSUBREG-NEXT: vrgatherei16.vv v13, v9, v10
@@ -43,7 +43,7 @@ define internal void @foo(<vscale x 1 x i16> %v15, <vscale x 1 x i16> %0, <vscal
; SUBREG-NEXT: .LBB0_1: # %loopIR3.i.i
; SUBREG-NEXT: # =>This Inner Loop Header: Depth=1
; SUBREG-NEXT: vl1r.v v9, (zero)
-; SUBREG-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; SUBREG-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; SUBREG-NEXT: vmv1r.v v13, v12
; SUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; SUBREG-NEXT: vrgatherei16.vv v13, v9, v10
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
index cfe5aee6edb9ae..56d40fbba6807a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
@@ -572,7 +572,7 @@ declare <vscale x 128 x i8> @llvm.vp.sadd.sat.nxv128i8(<vscale x 128 x i8>, <vsc
define <vscale x 128 x i8> @vsadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsadd_vi_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
@@ -1351,7 +1351,7 @@ declare <vscale x 32 x i32> @llvm.vp.sadd.sat.nxv32i32(<vscale x 32 x i32>, <vsc
define <vscale x 32 x i32> @vsadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsadd_vi_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
index f45154222a9a78..63277f5be7aa8d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
@@ -571,7 +571,7 @@ declare <vscale x 128 x i8> @llvm.vp.uadd.sat.nxv128i8(<vscale x 128 x i8>, <vsc
define <vscale x 128 x i8> @vsaddu_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsaddu_vi_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
@@ -1350,7 +1350,7 @@ declare <vscale x 32 x i32> @llvm.vp.uadd.sat.nxv32i32(<vscale x 32 x i32>, <vsc
define <vscale x 32 x i32> @vsaddu_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsaddu_vi_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-bf16.ll
index d176ccf6fabdad..bbde5b015d4b7b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-bf16.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-bf16.ll
@@ -126,7 +126,7 @@ define <vscale x 8 x bfloat> @vmerge_truelhs_nxv8bf16_0(<vscale x 8 x bfloat> %v
define <vscale x 8 x bfloat> @vmerge_falselhs_nxv8bf16_0(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) {
; CHECK-LABEL: vmerge_falselhs_nxv8bf16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%vc = select <vscale x 8 x i1> zeroinitializer, <vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
index 6b9c5fe7b72189..521e4be18f1dda 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
@@ -175,7 +175,7 @@ define <vscale x 8 x half> @vmerge_truelhs_nxv8f16_0(<vscale x 8 x half> %va, <v
define <vscale x 8 x half> @vmerge_falselhs_nxv8f16_0(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
; CHECK-LABEL: vmerge_falselhs_nxv8f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%vc = select <vscale x 8 x i1> zeroinitializer, <vscale x 8 x half> %va, <vscale x 8 x half> %vb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll
index a0eab1c61709e9..51b735717017e3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll
@@ -803,7 +803,7 @@ define <vscale x 8 x i64> @vmerge_truelhs_nxv8i64_0(<vscale x 8 x i64> %va, <vsc
define <vscale x 8 x i64> @vmerge_falselhs_nxv8i64_0(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
; CHECK-LABEL: vmerge_falselhs_nxv8i64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%vc = select <vscale x 8 x i1> zeroinitializer, <vscale x 8 x i64> %va, <vscale x 8 x i64> %vb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
index 6de6d56badbabd..38c4755e48b862 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
@@ -362,7 +362,7 @@ define <vscale x 32 x i32> @select_nxv32i32(<vscale x 32 x i1> %a, <vscale x 32
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: slli a4, a3, 3
@@ -422,7 +422,7 @@ define <vscale x 32 x i32> @select_evl_nxv32i32(<vscale x 32 x i1> %a, <vscale x
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a3, a1, 3
@@ -712,7 +712,7 @@ define <vscale x 16 x double> @select_nxv16f64(<vscale x 16 x i1> %a, <vscale x
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a3, a1, 3
@@ -838,7 +838,7 @@ define <vscale x 2 x i1> @select_cond_x_cond(<vscale x 2 x i1> %x, <vscale x 2 x
define <vscale x 2 x i1> @select_undef_T_F(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, i32 zeroext %evl) {
; CHECK-LABEL: select_undef_T_F:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> poison, <vscale x 2 x i1> %x, <vscale x 2 x i1> %y, i32 %evl)
@@ -856,7 +856,7 @@ define <vscale x 2 x i1> @select_undef_undef_F(<vscale x 2 x i1> %x, i32 zeroext
define <vscale x 2 x i1> @select_unknown_undef_F(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, i32 zeroext %evl) {
; CHECK-LABEL: select_unknown_undef_F:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> %x, <vscale x 2 x i1> undef, <vscale x 2 x i1> %y, i32 %evl)
@@ -866,7 +866,7 @@ define <vscale x 2 x i1> @select_unknown_undef_F(<vscale x 2 x i1> %x, <vscale x
define <vscale x 2 x i1> @select_unknown_T_undef(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, i32 zeroext %evl) {
; CHECK-LABEL: select_unknown_T_undef:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, <vscale x 2 x i1> poison, i32 %evl)
@@ -876,7 +876,7 @@ define <vscale x 2 x i1> @select_unknown_T_undef(<vscale x 2 x i1> %x, <vscale x
define <vscale x 2 x i1> @select_false_T_F(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, <vscale x 2 x i1> %z, i32 zeroext %evl) {
; CHECK-LABEL: select_false_T_F:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: ret
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> zeroinitializer, <vscale x 2 x i1> %y, <vscale x 2 x i1> %z, i32 %evl)
@@ -886,7 +886,7 @@ define <vscale x 2 x i1> @select_false_T_F(<vscale x 2 x i1> %x, <vscale x 2 x i
define <vscale x 2 x i1> @select_unknown_T_T(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, i32 zeroext %evl) {
; CHECK-LABEL: select_unknown_T_T:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, <vscale x 2 x i1> %y, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll
index 70d3f306542548..c7446bbf2cbcde 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll
@@ -18,7 +18,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
define <2 x double> @fixed_length(<2 x double> %a, <2 x double> %b) nounwind {
; CHECK-LABEL: fixed_length:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: # kill: def $v11 killed $v10
; CHECK-NEXT: # kill: def $v9 killed $v8
@@ -37,7 +37,7 @@ entry:
define <vscale x 1 x double> @scalable(<vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
; CHECK-LABEL: scalable:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: # implicit-def: $v9
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
@@ -55,7 +55,7 @@ entry:
define <vscale x 1 x double> @intrinsic_same_vlmax(<vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
; CHECK-LABEL: intrinsic_same_vlmax:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
; CHECK-NEXT: # implicit-def: $v9
@@ -84,7 +84,7 @@ entry:
define <vscale x 1 x double> @intrinsic_same_avl_imm(<vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
; CHECK-LABEL: intrinsic_same_avl_imm:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vsetivli a0, 2, e32, mf2, ta, ma
; CHECK-NEXT: # implicit-def: $v9
@@ -112,7 +112,7 @@ entry:
define <vscale x 1 x double> @intrinsic_same_avl_reg(i64 %avl, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
; CHECK-LABEL: intrinsic_same_avl_reg:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, ma
; CHECK-NEXT: # implicit-def: $v9
@@ -140,7 +140,7 @@ entry:
define <vscale x 1 x double> @intrinsic_diff_avl_reg(i64 %avl, i64 %avl2, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
; CHECK-LABEL: intrinsic_diff_avl_reg:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, ma
; CHECK-NEXT: # implicit-def: $v9
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
index ae59358505a752..f25da055467925 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
@@ -377,7 +377,7 @@ entry:
define <vscale x 1 x double> @test19(<vscale x 1 x double> %a, double %b) nounwind {
; CHECK-LABEL: test19:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll
index 7add176edf4bdd..378cdcfca07260 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll
@@ -151,7 +151,7 @@ declare <vscale x 32 x i32> @llvm.vp.sext.nxv32i32.nxv32i8(<vscale x 32 x i8>, <
define <vscale x 32 x i32> @vsext_nxv32i8_nxv32i32(<vscale x 32 x i8> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vsext_nxv32i8_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
index f75c22ee9d07a9..776cd977661d56 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
@@ -508,7 +508,7 @@ declare <vscale x 32 x half> @llvm.vp.sitofp.nxv32f16.nxv32i32(<vscale x 32 x i3
define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vsitofp_nxv32f16_nxv32i32:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v24, v0
; ZVFH-NEXT: csrr a1, vlenb
; ZVFH-NEXT: srli a2, a1, 2
@@ -533,7 +533,7 @@ define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
;
; ZVFHMIN-LABEL: vsitofp_nxv32f16_nxv32i32:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: srli a2, a1, 2
@@ -568,7 +568,7 @@ declare <vscale x 32 x float> @llvm.vp.sitofp.nxv32f32.nxv32i32(<vscale x 32 x i
define <vscale x 32 x float> @vsitofp_nxv32f32_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsitofp_nxv32f32_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
index b31cba065b0b98..5c22b46621a77f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
@@ -590,7 +590,7 @@ declare <vscale x 128 x i8> @llvm.vp.ssub.sat.nxv128i8(<vscale x 128 x i8>, <vsc
define <vscale x 128 x i8> @vssub_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vssub_vi_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
@@ -1393,7 +1393,7 @@ declare <vscale x 32 x i32> @llvm.vp.ssub.sat.nxv32i32(<vscale x 32 x i32>, <vsc
define <vscale x 32 x i32> @vssub_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vssub_vi_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
index 388614739dd82e..49f72975c4c495 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
@@ -588,7 +588,7 @@ declare <vscale x 128 x i8> @llvm.vp.usub.sat.nxv128i8(<vscale x 128 x i8>, <vsc
define <vscale x 128 x i8> @vssubu_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vssubu_vi_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
@@ -1391,7 +1391,7 @@ declare <vscale x 32 x i32> @llvm.vp.usub.sat.nxv32i32(<vscale x 32 x i32>, <vsc
define <vscale x 32 x i32> @vssubu_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vssubu_vi_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
index 915d7c99d7c667..cc0390a9238eb9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
@@ -157,7 +157,7 @@ declare <vscale x 15 x i16> @llvm.vp.trunc.nxv15i16.nxv15i64(<vscale x 15 x i64>
define <vscale x 15 x i16> @vtrunc_nxv15i16_nxv15i64(<vscale x 15 x i64> %a, <vscale x 15 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv15i16_nxv15i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
@@ -215,7 +215,7 @@ declare <vscale x 32 x i7> @llvm.vp.trunc.nxv32i7.nxv32i32(<vscale x 32 x i32>,
define <vscale x 32 x i7> @vtrunc_nxv32i7_nxv32i32(<vscale x 32 x i32> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv32i7_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -250,7 +250,7 @@ declare <vscale x 32 x i8> @llvm.vp.trunc.nxv32i8.nxv32i32(<vscale x 32 x i32>,
define <vscale x 32 x i8> @vtrunc_nxv32i8_nxv32i32(<vscale x 32 x i32> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv32i8_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -291,7 +291,7 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
index 5cee7f981c540d..e061f074db5bd5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
@@ -500,7 +500,7 @@ declare <vscale x 32 x half> @llvm.vp.uitofp.nxv32f16.nxv32i32(<vscale x 32 x i3
define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vuitofp_nxv32f16_nxv32i32:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v24, v0
; ZVFH-NEXT: csrr a1, vlenb
; ZVFH-NEXT: srli a2, a1, 2
@@ -525,7 +525,7 @@ define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
;
; ZVFHMIN-LABEL: vuitofp_nxv32f16_nxv32i32:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: srli a2, a1, 2
@@ -560,7 +560,7 @@ declare <vscale x 32 x float> @llvm.vp.uitofp.nxv32f32.nxv32i32(<vscale x 32 x i
define <vscale x 32 x float> @vuitofp_nxv32f32_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vuitofp_nxv32f32_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll
index 6f74d752bbabec..cddc16a058eeaa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll
@@ -151,7 +151,7 @@ declare <vscale x 32 x i32> @llvm.vp.zext.nxv32i32.nxv32i8(<vscale x 32 x i8>, <
define <vscale x 32 x i32> @vzext_nxv32i8_nxv32i32(<vscale x 32 x i8> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vzext_nxv32i8_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
>From 9e4eab84a7d4c85e829b845b35b06458f7555bff Mon Sep 17 00:00:00 2001
From: Piyou Chen <piyou.chen at sifive.com>
Date: Sun, 1 Dec 2024 22:33:39 -0800
Subject: [PATCH 7/7] EEW=32 -> EEW=8
---
llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 2 +-
.../CodeGen/RISCV/inline-asm-v-constraint.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/abs-vp.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll | 4 +-
.../CodeGen/RISCV/rvv/calling-conv-fastcc.ll | 8 +-
llvm/test/CodeGen/RISCV/rvv/calling-conv.ll | 8 +-
llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll | 38 +-
llvm/test/CodeGen/RISCV/rvv/compressstore.ll | 4 +-
.../RISCV/rvv/constant-folding-crash.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll | 8 +-
llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll | 10 +-
llvm/test/CodeGen/RISCV/rvv/expandload.ll | 972 +++++++++---------
.../CodeGen/RISCV/rvv/extract-subvector.ll | 38 +-
.../rvv/fixed-vector-i8-index-cornercase.ll | 4 +-
.../RISCV/rvv/fixed-vectors-bitreverse-vp.ll | 4 +-
.../rvv/fixed-vectors-calling-conv-fastcc.ll | 2 +-
.../RISCV/rvv/fixed-vectors-calling-conv.ll | 2 +-
.../RISCV/rvv/fixed-vectors-ceil-vp.ll | 20 +-
.../RISCV/rvv/fixed-vectors-ctpop-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-floor-vp.ll | 20 +-
.../RISCV/rvv/fixed-vectors-fmaximum-vp.ll | 34 +-
.../RISCV/rvv/fixed-vectors-fminimum-vp.ll | 34 +-
.../RISCV/rvv/fixed-vectors-fp-interleave.ll | 4 +-
.../RISCV/rvv/fixed-vectors-fptrunc-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll | 2 +-
.../rvv/fixed-vectors-insert-subvector.ll | 4 +-
.../RISCV/rvv/fixed-vectors-int-interleave.ll | 4 +-
.../RISCV/rvv/fixed-vectors-masked-gather.ll | 98 +-
.../rvv/fixed-vectors-masked-load-int.ll | 2 +-
.../RISCV/rvv/fixed-vectors-nearbyint-vp.ll | 16 +-
.../rvv/fixed-vectors-reduction-mask-vp.ll | 62 +-
.../RISCV/rvv/fixed-vectors-rint-vp.ll | 16 +-
.../RISCV/rvv/fixed-vectors-round-vp.ll | 20 +-
.../RISCV/rvv/fixed-vectors-roundeven-vp.ll | 20 +-
.../RISCV/rvv/fixed-vectors-roundtozero-vp.ll | 20 +-
.../RISCV/rvv/fixed-vectors-setcc-int-vp.ll | 6 +-
.../RISCV/rvv/fixed-vectors-shuffle-concat.ll | 18 +-
.../rvv/fixed-vectors-shuffle-exact-vlen.ll | 4 +-
.../rvv/fixed-vectors-shuffle-reverse.ll | 22 +-
.../rvv/fixed-vectors-shuffle-vslide1up.ll | 2 +-
.../fixed-vectors-strided-load-store-asm.ll | 2 +-
.../RISCV/rvv/fixed-vectors-strided-vpload.ll | 6 +-
.../RISCV/rvv/fixed-vectors-trunc-vp.ll | 6 +-
.../RISCV/rvv/fixed-vectors-unaligned.ll | 8 +-
.../RISCV/rvv/fixed-vectors-vadd-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vmax-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vmaxu-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vmin-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vminu-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vpgather.ll | 2 +-
.../CodeGen/RISCV/rvv/fixed-vectors-vpload.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vpmerge.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vsadd-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vsaddu-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vselect-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vssub-vp.ll | 2 +-
.../RISCV/rvv/fixed-vectors-vssubu-vp.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/floor-vp.ll | 38 +-
.../test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll | 50 +-
.../test/CodeGen/RISCV/rvv/fminimum-sdnode.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll | 50 +-
.../RISCV/rvv/fold-scalar-load-crash.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll | 10 +-
llvm/test/CodeGen/RISCV/rvv/inline-asm.ll | 14 +-
.../CodeGen/RISCV/rvv/insert-subvector.ll | 44 +-
llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/masked-tama.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll | 32 +-
.../test/CodeGen/RISCV/rvv/mscatter-sdnode.ll | 2 +-
.../RISCV/rvv/named-vector-shuffle-reverse.ll | 26 +-
llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll | 38 +-
llvm/test/CodeGen/RISCV/rvv/pr88576.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/rint-vp.ll | 38 +-
llvm/test/CodeGen/RISCV/rvv/round-vp.ll | 38 +-
llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll | 38 +-
llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll | 38 +-
.../RISCV/rvv/rv32-spill-vector-csr.ll | 2 +-
.../RISCV/rvv/rv64-spill-vector-csr.ll | 2 +-
.../test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll | 2 +-
.../RISCV/rvv/rvv-peephole-vmerge-vops.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll | 12 +-
.../CodeGen/RISCV/rvv/sink-splat-operands.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll | 8 +-
.../test/CodeGen/RISCV/rvv/strided-vpstore.ll | 4 +-
.../RISCV/rvv/undef-earlyclobber-chain.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vcpop.ll | 14 +-
.../RISCV/rvv/vector-deinterleave-fixed.ll | 2 +-
.../CodeGen/RISCV/rvv/vector-deinterleave.ll | 16 +-
.../RISCV/rvv/vector-interleave-fixed.ll | 8 +-
.../RISCV/rvv/vector-interleave-store.ll | 2 +-
.../CodeGen/RISCV/rvv/vector-interleave.ll | 30 +-
.../RISCV/rvv/vector-reassociations.ll | 8 +-
llvm/test/CodeGen/RISCV/rvv/vector-splice.ll | 24 +-
llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll | 12 +-
llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll | 12 +-
llvm/test/CodeGen/RISCV/rvv/vfirst.ll | 14 +-
llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll | 28 +-
.../RISCV/rvv/vfmadd-constrained-sdnode.ll | 8 +-
llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll | 12 +-
llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll | 2 +-
.../RISCV/rvv/vfnmadd-constrained-sdnode.ll | 2 +-
.../RISCV/rvv/vfnmsub-constrained-sdnode.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll | 12 +-
llvm/test/CodeGen/RISCV/rvv/vl-opt.ll | 4 +-
.../CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll | 330 +++---
.../CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll | 330 +++---
llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vmfeq.ll | 24 +-
llvm/test/CodeGen/RISCV/rvv/vmfge.ll | 24 +-
llvm/test/CodeGen/RISCV/rvv/vmfgt.ll | 24 +-
llvm/test/CodeGen/RISCV/rvv/vmfle.ll | 24 +-
llvm/test/CodeGen/RISCV/rvv/vmflt.ll | 24 +-
llvm/test/CodeGen/RISCV/rvv/vmfne.ll | 24 +-
llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vmsbf.ll | 14 +-
llvm/test/CodeGen/RISCV/rvv/vmseq.ll | 72 +-
llvm/test/CodeGen/RISCV/rvv/vmsge.ll | 74 +-
llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll | 72 +-
llvm/test/CodeGen/RISCV/rvv/vmsgt.ll | 72 +-
llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll | 72 +-
llvm/test/CodeGen/RISCV/rvv/vmsif.ll | 14 +-
llvm/test/CodeGen/RISCV/rvv/vmsle.ll | 72 +-
llvm/test/CodeGen/RISCV/rvv/vmsleu.ll | 72 +-
llvm/test/CodeGen/RISCV/rvv/vmslt.ll | 72 +-
llvm/test/CodeGen/RISCV/rvv/vmsltu.ll | 72 +-
llvm/test/CodeGen/RISCV/rvv/vmsne.ll | 72 +-
llvm/test/CodeGen/RISCV/rvv/vmsof.ll | 14 +-
.../CodeGen/RISCV/rvv/vmv.v.v-peephole.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll | 8 +-
llvm/test/CodeGen/RISCV/rvv/vp-select.ll | 2 +-
.../RISCV/rvv/vp-splice-mask-fixed-vectors.ll | 24 +-
.../RISCV/rvv/vp-splice-mask-vectors.ll | 42 +-
.../test/CodeGen/RISCV/rvv/vpgather-sdnode.ll | 20 +-
llvm/test/CodeGen/RISCV/rvv/vpload.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vpstore.ll | 4 +-
.../CodeGen/RISCV/rvv/vreductions-mask-vp.ll | 74 +-
.../RISCV/rvv/vrgatherei16-subreg-liveness.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vselect-bf16.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vselect-int.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll | 16 +-
.../CodeGen/RISCV/rvv/vsetvli-insert-O0.ll | 12 +-
llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll | 4 +-
llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll | 8 +-
llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll | 2 +-
173 files changed, 2128 insertions(+), 2128 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index c05e84e48fd169..0b0a15780c2be5 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -1809,7 +1809,7 @@ void RISCVInsertVSETVLI::insertVSETIVLIBeforeCopy(MachineBasicBlock &MBB) {
BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(RISCV::PseudoVSETIVLI))
.addReg(RISCV::X0, RegState::Define | RegState::Dead)
.addImm(0)
- .addImm(RISCVVType::encodeVTYPE(RISCVII::VLMUL::LMUL_1, 32, false,
+ .addImm(RISCVVType::encodeVTYPE(RISCVII::VLMUL::LMUL_1, 8, false,
false));
if (LIS)
LIS->InsertMachineInstrInMaps(*VSETVL0MI);
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-v-constraint.ll b/llvm/test/CodeGen/RISCV/inline-asm-v-constraint.ll
index 77ffdd9ae934a6..a2286af7ae0b92 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-v-constraint.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-v-constraint.ll
@@ -45,7 +45,7 @@ define <vscale x 1 x i8> @constraint_vd(<vscale x 1 x i8> %0, <vscale x 1 x i8>
define <vscale x 1 x i1> @constraint_vm(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1) nounwind {
; RV32I-LABEL: constraint_vm:
; RV32I: # %bb.0:
-; RV32I-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32I-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32I-NEXT: vmv1r.v v9, v0
; RV32I-NEXT: vmv1r.v v0, v8
; RV32I-NEXT: #APP
@@ -55,7 +55,7 @@ define <vscale x 1 x i1> @constraint_vm(<vscale x 1 x i1> %0, <vscale x 1 x i1>
;
; RV64I-LABEL: constraint_vm:
; RV64I: # %bb.0:
-; RV64I-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64I-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64I-NEXT: vmv1r.v v9, v0
; RV64I-NEXT: vmv1r.v v0, v8
; RV64I-NEXT: #APP
diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
index d91659d9e1c7c4..0a0e72c05f6769 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
@@ -567,7 +567,7 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64(<vscale x 16 x i64> %va, <vscale x 1
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
index 2836459ec5e0a3..3842944f28d135 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
@@ -3075,7 +3075,7 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -3159,7 +3159,7 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
;
; CHECK-ZVBB-LABEL: vp_bitreverse_nxv64i16:
; CHECK-ZVBB: # %bb.0:
-; CHECK-ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-ZVBB-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v24, v0
; CHECK-ZVBB-NEXT: csrr a1, vlenb
; CHECK-ZVBB-NEXT: srli a2, a1, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
index 113ae37b08ae65..971ea27f6b2659 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
@@ -1584,7 +1584,7 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -1632,7 +1632,7 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
;
; CHECK-ZVKB-LABEL: vp_bswap_nxv64i16:
; CHECK-ZVKB: # %bb.0:
-; CHECK-ZVKB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-ZVKB-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-ZVKB-NEXT: vmv1r.v v24, v0
; CHECK-ZVKB-NEXT: csrr a1, vlenb
; CHECK-ZVKB-NEXT: srli a2, a1, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
index 5c533f042b4e53..96f130848eed7a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
@@ -336,7 +336,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_i32(<vsca
; RV32-NEXT: add a1, a3, a1
; RV32-NEXT: li a3, 2
; RV32-NEXT: vs8r.v v16, (a1)
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv8r.v v8, v0
; RV32-NEXT: vmv8r.v v16, v24
; RV32-NEXT: call ext2
@@ -375,7 +375,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_i32(<vsca
; RV64-NEXT: add a1, a3, a1
; RV64-NEXT: li a3, 2
; RV64-NEXT: vs8r.v v16, (a1)
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv8r.v v8, v0
; RV64-NEXT: vmv8r.v v16, v24
; RV64-NEXT: call ext2
@@ -453,7 +453,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 128
; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv8r.v v16, v0
; RV32-NEXT: call ext3
; RV32-NEXT: addi sp, s0, -144
@@ -526,7 +526,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 128
; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv8r.v v16, v0
; RV64-NEXT: call ext3
; RV64-NEXT: addi sp, s0, -144
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
index 068fdad8a4ab3e..791a5a812f7877 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
@@ -103,7 +103,7 @@ define target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @caller_tuple_return(
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: call callee_tuple_return
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv2r.v v6, v8
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: vmv2r.v v10, v6
@@ -120,7 +120,7 @@ define target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @caller_tuple_return(
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: call callee_tuple_return
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv2r.v v6, v8
; RV64-NEXT: vmv2r.v v8, v10
; RV64-NEXT: vmv2r.v v10, v6
@@ -146,7 +146,7 @@ define void @caller_tuple_argument(target("riscv.vector.tuple", <vscale x 16 x i
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv2r.v v6, v8
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: vmv2r.v v10, v6
@@ -163,7 +163,7 @@ define void @caller_tuple_argument(target("riscv.vector.tuple", <vscale x 16 x i
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv2r.v v6, v8
; RV64-NEXT: vmv2r.v v8, v10
; RV64-NEXT: vmv2r.v v10, v6
diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
index e0522b7eb1e67a..1d3c6c304661b0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
@@ -117,7 +117,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.ceil.nxv4bf16(<vscale x 4 x bfloat>, <vsc
define <vscale x 4 x bfloat> @vp_ceil_vv_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv4bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -170,7 +170,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.ceil.nxv8bf16(<vscale x 8 x bfloat>, <vsc
define <vscale x 8 x bfloat> @vp_ceil_vv_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv8bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -223,7 +223,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.ceil.nxv16bf16(<vscale x 16 x bfloat>, <
define <vscale x 16 x bfloat> @vp_ceil_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv16bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -282,7 +282,7 @@ define <vscale x 32 x bfloat> @vp_ceil_vv_nxv32bf16(<vscale x 32 x bfloat> %va,
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -586,7 +586,7 @@ define <vscale x 4 x half> @vp_ceil_vv_nxv4f16(<vscale x 4 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vp_ceil_vv_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -654,7 +654,7 @@ declare <vscale x 8 x half> @llvm.vp.ceil.nxv8f16(<vscale x 8 x half>, <vscale x
define <vscale x 8 x half> @vp_ceil_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -674,7 +674,7 @@ define <vscale x 8 x half> @vp_ceil_vv_nxv8f16(<vscale x 8 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vp_ceil_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -742,7 +742,7 @@ declare <vscale x 16 x half> @llvm.vp.ceil.nxv16f16(<vscale x 16 x half>, <vscal
define <vscale x 16 x half> @vp_ceil_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -762,7 +762,7 @@ define <vscale x 16 x half> @vp_ceil_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
;
; ZVFHMIN-LABEL: vp_ceil_vv_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -830,7 +830,7 @@ declare <vscale x 32 x half> @llvm.vp.ceil.nxv32f16(<vscale x 32 x half>, <vscal
define <vscale x 32 x half> @vp_ceil_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -856,7 +856,7 @@ define <vscale x 32 x half> @vp_ceil_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -1079,7 +1079,7 @@ declare <vscale x 4 x float> @llvm.vp.ceil.nxv4f32(<vscale x 4 x float>, <vscale
define <vscale x 4 x float> @vp_ceil_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1124,7 +1124,7 @@ declare <vscale x 8 x float> @llvm.vp.ceil.nxv8f32(<vscale x 8 x float>, <vscale
define <vscale x 8 x float> @vp_ceil_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1169,7 +1169,7 @@ declare <vscale x 16 x float> @llvm.vp.ceil.nxv16f32(<vscale x 16 x float>, <vsc
define <vscale x 16 x float> @vp_ceil_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1256,7 +1256,7 @@ declare <vscale x 2 x double> @llvm.vp.ceil.nxv2f64(<vscale x 2 x double>, <vsca
define <vscale x 2 x double> @vp_ceil_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1301,7 +1301,7 @@ declare <vscale x 4 x double> @llvm.vp.ceil.nxv4f64(<vscale x 4 x double>, <vsca
define <vscale x 4 x double> @vp_ceil_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1346,7 +1346,7 @@ declare <vscale x 7 x double> @llvm.vp.ceil.nxv7f64(<vscale x 7 x double>, <vsca
define <vscale x 7 x double> @vp_ceil_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1391,7 +1391,7 @@ declare <vscale x 8 x double> @llvm.vp.ceil.nxv8f64(<vscale x 8 x double>, <vsca
define <vscale x 8 x double> @vp_ceil_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1443,7 +1443,7 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64(<vscale x 16 x double> %va, <
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
index 20b12df953a18c..a8922fb4da1b84 100644
--- a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
@@ -197,7 +197,7 @@ entry:
define void @test_compresstore_v256i8(ptr %p, <256 x i1> %mask, <256 x i8> %data) {
; RV64-LABEL: test_compresstore_v256i8:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v7, v8
; RV64-NEXT: li a2, 128
; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
@@ -231,7 +231,7 @@ define void @test_compresstore_v256i8(ptr %p, <256 x i1> %mask, <256 x i8> %data
; RV32-NEXT: slli a2, a2, 3
; RV32-NEXT: sub sp, sp, a2
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv8r.v v24, v16
; RV32-NEXT: li a2, 128
; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
index 58236dc1439273..7632e7a68996fb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
@@ -18,7 +18,7 @@
define void @constant_folding_crash(ptr %v54, <4 x ptr> %lanes.a, <4 x ptr> %lanes.b, <4 x i1> %sel) {
; RV32-LABEL: constant_folding_crash:
; RV32: # %bb.0: # %entry
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v10, v0
; RV32-NEXT: lw a0, 8(a0)
; RV32-NEXT: andi a0, a0, 1
@@ -44,7 +44,7 @@ define void @constant_folding_crash(ptr %v54, <4 x ptr> %lanes.a, <4 x ptr> %lan
;
; RV64-LABEL: constant_folding_crash:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: ld a0, 8(a0)
; RV64-NEXT: andi a0, a0, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
index 4c818a515f96e9..2589e83d628580 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
@@ -1235,7 +1235,7 @@ declare <vscale x 16 x i64> @llvm.vp.ctlz.nxv16i64(<vscale x 16 x i64>, i1 immar
define <vscale x 16 x i64> @vp_ctlz_nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ctlz_nxv16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: fsrmi a4, 1
@@ -1271,7 +1271,7 @@ define <vscale x 16 x i64> @vp_ctlz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
;
; CHECK-ZVBB-LABEL: vp_ctlz_nxv16i64:
; CHECK-ZVBB: # %bb.0:
-; CHECK-ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-ZVBB-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v24, v0
; CHECK-ZVBB-NEXT: csrr a1, vlenb
; CHECK-ZVBB-NEXT: srli a2, a1, 3
@@ -2467,7 +2467,7 @@ define <vscale x 8 x i64> @vp_ctlz_zero_undef_nxv8i64_unmasked(<vscale x 8 x i64
define <vscale x 16 x i64> @vp_ctlz_zero_undef_nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ctlz_zero_undef_nxv16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: fsrmi a3, 1
@@ -2500,7 +2500,7 @@ define <vscale x 16 x i64> @vp_ctlz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
;
; CHECK-ZVBB-LABEL: vp_ctlz_zero_undef_nxv16i64:
; CHECK-ZVBB: # %bb.0:
-; CHECK-ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-ZVBB-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v24, v0
; CHECK-ZVBB-NEXT: csrr a1, vlenb
; CHECK-ZVBB-NEXT: srli a2, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
index efe1b4293145fa..dea300dc04d5ea 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
@@ -2022,7 +2022,7 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v7, v0
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: li a2, 24
@@ -2295,7 +2295,7 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
;
; CHECK-ZVBB-LABEL: vp_ctpop_nxv16i64:
; CHECK-ZVBB: # %bb.0:
-; CHECK-ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-ZVBB-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v24, v0
; CHECK-ZVBB-NEXT: csrr a1, vlenb
; CHECK-ZVBB-NEXT: srli a2, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
index 96d3e446387fe7..fe0e8f873cbbb4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
@@ -2246,7 +2246,7 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 56 * vlenb
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v24, v0
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: slli a1, a1, 5
@@ -2500,7 +2500,7 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV64-NEXT: slli a1, a1, 4
; RV64-NEXT: sub sp, sp, a1
; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v24, v0
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: slli a1, a1, 3
@@ -2588,7 +2588,7 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
;
; CHECK-ZVBB-LABEL: vp_cttz_nxv16i64:
; CHECK-ZVBB: # %bb.0:
-; CHECK-ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-ZVBB-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v24, v0
; CHECK-ZVBB-NEXT: csrr a1, vlenb
; CHECK-ZVBB-NEXT: srli a2, a1, 3
@@ -4005,7 +4005,7 @@ define <vscale x 16 x i64> @vp_cttz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -4061,7 +4061,7 @@ define <vscale x 16 x i64> @vp_cttz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
;
; CHECK-ZVBB-LABEL: vp_cttz_zero_undef_nxv16i64:
; CHECK-ZVBB: # %bb.0:
-; CHECK-ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-ZVBB-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-ZVBB-NEXT: vmv1r.v v24, v0
; CHECK-ZVBB-NEXT: csrr a1, vlenb
; CHECK-ZVBB-NEXT: srli a2, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/expandload.ll b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
index 11cf9fe86a9488..2350164c4ae98d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/expandload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
@@ -227,7 +227,7 @@ define <256 x i8> @test_expandload_v256i8(ptr %base, <256 x i1> %mask, <256 x i8
; CHECK-RV32-NEXT: add a2, sp, a2
; CHECK-RV32-NEXT: addi a2, a2, 16
; CHECK-RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv1r.v v7, v8
; CHECK-RV32-NEXT: li a2, 128
; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
@@ -339,7 +339,7 @@ define <256 x i8> @test_expandload_v256i8(ptr %base, <256 x i1> %mask, <256 x i8
; CHECK-RV64-NEXT: add a2, sp, a2
; CHECK-RV64-NEXT: addi a2, a2, 16
; CHECK-RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv1r.v v7, v8
; CHECK-RV64-NEXT: li a2, 128
; CHECK-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
@@ -1628,7 +1628,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a1, .LBB61_30
; CHECK-RV32-NEXT: .LBB61_29: # %cond.load109
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 29, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -1642,7 +1642,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: bgez a2, .LBB61_32
; CHECK-RV32-NEXT: # %bb.31: # %cond.load113
; CHECK-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 30, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a2
@@ -1791,7 +1791,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_65: # %cond.load241
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -1945,7 +1945,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_99: # %cond.load369
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -2099,7 +2099,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_133: # %cond.load497
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -2253,7 +2253,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_167: # %cond.load625
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -2407,7 +2407,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_201: # %cond.load753
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -2561,7 +2561,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_235: # %cond.load881
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -2715,7 +2715,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_269: # %cond.load1009
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -3931,7 +3931,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_3
; CHECK-RV32-NEXT: .LBB61_546: # %cond.load5
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 3, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -3944,7 +3944,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_4
; CHECK-RV32-NEXT: .LBB61_547: # %cond.load9
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -3957,7 +3957,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_5
; CHECK-RV32-NEXT: .LBB61_548: # %cond.load13
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 5, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -3970,7 +3970,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_6
; CHECK-RV32-NEXT: .LBB61_549: # %cond.load17
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 6, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -3983,7 +3983,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_7
; CHECK-RV32-NEXT: .LBB61_550: # %cond.load21
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 7, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -3996,7 +3996,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_8
; CHECK-RV32-NEXT: .LBB61_551: # %cond.load25
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 8, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4009,7 +4009,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_9
; CHECK-RV32-NEXT: .LBB61_552: # %cond.load29
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 9, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4022,7 +4022,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_10
; CHECK-RV32-NEXT: .LBB61_553: # %cond.load33
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 10, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4035,7 +4035,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_11
; CHECK-RV32-NEXT: .LBB61_554: # %cond.load37
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 11, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4048,7 +4048,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_12
; CHECK-RV32-NEXT: .LBB61_555: # %cond.load41
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 12, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4061,7 +4061,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_13
; CHECK-RV32-NEXT: .LBB61_556: # %cond.load45
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 13, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4074,7 +4074,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_14
; CHECK-RV32-NEXT: .LBB61_557: # %cond.load49
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 14, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4087,7 +4087,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_15
; CHECK-RV32-NEXT: .LBB61_558: # %cond.load53
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 15, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4100,7 +4100,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_16
; CHECK-RV32-NEXT: .LBB61_559: # %cond.load57
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 16, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4113,7 +4113,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_17
; CHECK-RV32-NEXT: .LBB61_560: # %cond.load61
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 17, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4126,7 +4126,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_18
; CHECK-RV32-NEXT: .LBB61_561: # %cond.load65
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 18, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4139,7 +4139,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_19
; CHECK-RV32-NEXT: .LBB61_562: # %cond.load69
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 19, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4152,7 +4152,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_20
; CHECK-RV32-NEXT: .LBB61_563: # %cond.load73
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 20, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4165,7 +4165,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_21
; CHECK-RV32-NEXT: .LBB61_564: # %cond.load77
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 21, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4178,7 +4178,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_22
; CHECK-RV32-NEXT: .LBB61_565: # %cond.load81
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 22, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4191,7 +4191,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_23
; CHECK-RV32-NEXT: .LBB61_566: # %cond.load85
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 23, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4204,7 +4204,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_24
; CHECK-RV32-NEXT: .LBB61_567: # %cond.load89
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 24, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4217,7 +4217,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_25
; CHECK-RV32-NEXT: .LBB61_568: # %cond.load93
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 25, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4230,7 +4230,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_26
; CHECK-RV32-NEXT: .LBB61_569: # %cond.load97
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 26, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4243,7 +4243,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_27
; CHECK-RV32-NEXT: .LBB61_570: # %cond.load101
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 27, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4256,7 +4256,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: j .LBB61_28
; CHECK-RV32-NEXT: .LBB61_571: # %cond.load105
; CHECK-RV32-NEXT: lbu a1, 0(a0)
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetivli zero, 28, e8, m1, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a1
@@ -4285,7 +4285,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_573: # %cond.load125
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4302,7 +4302,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_574: # %cond.load129
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4319,7 +4319,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_575: # %cond.load133
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4336,7 +4336,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_576: # %cond.load137
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4353,7 +4353,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_577: # %cond.load141
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4370,7 +4370,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_578: # %cond.load145
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4387,7 +4387,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_579: # %cond.load149
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4404,7 +4404,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_580: # %cond.load153
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4421,7 +4421,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_581: # %cond.load157
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4438,7 +4438,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_582: # %cond.load161
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4455,7 +4455,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_583: # %cond.load165
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4472,7 +4472,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_584: # %cond.load169
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4489,7 +4489,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_585: # %cond.load173
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4506,7 +4506,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_586: # %cond.load177
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4523,7 +4523,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_587: # %cond.load181
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4540,7 +4540,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_588: # %cond.load185
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4557,7 +4557,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_589: # %cond.load189
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4574,7 +4574,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_590: # %cond.load193
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4591,7 +4591,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_591: # %cond.load197
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4608,7 +4608,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_592: # %cond.load201
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4625,7 +4625,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_593: # %cond.load205
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4642,7 +4642,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_594: # %cond.load209
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4659,7 +4659,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_595: # %cond.load213
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4676,7 +4676,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_596: # %cond.load217
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4693,7 +4693,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_597: # %cond.load221
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4710,7 +4710,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_598: # %cond.load225
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4727,7 +4727,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_599: # %cond.load229
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4744,7 +4744,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_600: # %cond.load233
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4761,7 +4761,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_601: # %cond.load237
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v9, a3
@@ -4794,7 +4794,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_603: # %cond.load253
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4811,7 +4811,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_604: # %cond.load257
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4828,7 +4828,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_605: # %cond.load261
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4845,7 +4845,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_606: # %cond.load265
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4862,7 +4862,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_607: # %cond.load269
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4879,7 +4879,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_608: # %cond.load273
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4896,7 +4896,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_609: # %cond.load277
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4913,7 +4913,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_610: # %cond.load281
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4930,7 +4930,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_611: # %cond.load285
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4947,7 +4947,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_612: # %cond.load289
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4964,7 +4964,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_613: # %cond.load293
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4981,7 +4981,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_614: # %cond.load297
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -4998,7 +4998,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_615: # %cond.load301
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5015,7 +5015,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_616: # %cond.load305
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5032,7 +5032,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_617: # %cond.load309
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5049,7 +5049,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_618: # %cond.load313
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5066,7 +5066,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_619: # %cond.load317
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5083,7 +5083,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_620: # %cond.load321
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5100,7 +5100,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_621: # %cond.load325
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5117,7 +5117,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_622: # %cond.load329
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5134,7 +5134,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_623: # %cond.load333
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5151,7 +5151,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_624: # %cond.load337
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5168,7 +5168,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_625: # %cond.load341
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5185,7 +5185,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_626: # %cond.load345
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5202,7 +5202,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_627: # %cond.load349
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5219,7 +5219,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_628: # %cond.load353
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5236,7 +5236,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_629: # %cond.load357
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5253,7 +5253,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_630: # %cond.load361
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5270,7 +5270,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_631: # %cond.load365
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a2
@@ -5303,7 +5303,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_633: # %cond.load381
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5320,7 +5320,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_634: # %cond.load385
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5337,7 +5337,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_635: # %cond.load389
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5354,7 +5354,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_636: # %cond.load393
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5371,7 +5371,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_637: # %cond.load397
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5388,7 +5388,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_638: # %cond.load401
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5405,7 +5405,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_639: # %cond.load405
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5422,7 +5422,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_640: # %cond.load409
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5439,7 +5439,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_641: # %cond.load413
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5456,7 +5456,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_642: # %cond.load417
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5473,7 +5473,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_643: # %cond.load421
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5490,7 +5490,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_644: # %cond.load425
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5507,7 +5507,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_645: # %cond.load429
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5524,7 +5524,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_646: # %cond.load433
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5541,7 +5541,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_647: # %cond.load437
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5558,7 +5558,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_648: # %cond.load441
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5575,7 +5575,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_649: # %cond.load445
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5592,7 +5592,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_650: # %cond.load449
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5609,7 +5609,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_651: # %cond.load453
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5626,7 +5626,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_652: # %cond.load457
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5643,7 +5643,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_653: # %cond.load461
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5660,7 +5660,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_654: # %cond.load465
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5677,7 +5677,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_655: # %cond.load469
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5694,7 +5694,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_656: # %cond.load473
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5711,7 +5711,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_657: # %cond.load477
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5728,7 +5728,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_658: # %cond.load481
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5745,7 +5745,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_659: # %cond.load485
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5762,7 +5762,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_660: # %cond.load489
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5779,7 +5779,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_661: # %cond.load493
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v10, a3
@@ -5812,7 +5812,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_663: # %cond.load509
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5829,7 +5829,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_664: # %cond.load513
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5846,7 +5846,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_665: # %cond.load517
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5863,7 +5863,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_666: # %cond.load521
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5880,7 +5880,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_667: # %cond.load525
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5897,7 +5897,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_668: # %cond.load529
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5914,7 +5914,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_669: # %cond.load533
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5931,7 +5931,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_670: # %cond.load537
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5948,7 +5948,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_671: # %cond.load541
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5965,7 +5965,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_672: # %cond.load545
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5982,7 +5982,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_673: # %cond.load549
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -5999,7 +5999,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_674: # %cond.load553
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6016,7 +6016,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_675: # %cond.load557
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6033,7 +6033,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_676: # %cond.load561
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6050,7 +6050,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_677: # %cond.load565
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6067,7 +6067,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_678: # %cond.load569
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6084,7 +6084,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_679: # %cond.load573
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6101,7 +6101,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_680: # %cond.load577
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6118,7 +6118,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_681: # %cond.load581
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6135,7 +6135,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_682: # %cond.load585
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6152,7 +6152,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_683: # %cond.load589
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6169,7 +6169,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_684: # %cond.load593
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6186,7 +6186,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_685: # %cond.load597
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6203,7 +6203,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_686: # %cond.load601
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6220,7 +6220,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_687: # %cond.load605
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6237,7 +6237,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_688: # %cond.load609
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6254,7 +6254,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_689: # %cond.load613
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6271,7 +6271,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_690: # %cond.load617
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6288,7 +6288,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_691: # %cond.load621
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6321,7 +6321,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_693: # %cond.load637
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6338,7 +6338,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_694: # %cond.load641
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6355,7 +6355,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_695: # %cond.load645
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6372,7 +6372,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_696: # %cond.load649
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6389,7 +6389,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_697: # %cond.load653
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6406,7 +6406,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_698: # %cond.load657
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6423,7 +6423,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_699: # %cond.load661
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6440,7 +6440,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_700: # %cond.load665
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6457,7 +6457,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_701: # %cond.load669
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6474,7 +6474,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_702: # %cond.load673
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6491,7 +6491,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_703: # %cond.load677
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6508,7 +6508,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_704: # %cond.load681
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6525,7 +6525,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_705: # %cond.load685
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6542,7 +6542,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_706: # %cond.load689
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6559,7 +6559,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_707: # %cond.load693
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6576,7 +6576,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_708: # %cond.load697
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6593,7 +6593,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_709: # %cond.load701
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6610,7 +6610,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_710: # %cond.load705
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6627,7 +6627,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_711: # %cond.load709
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6644,7 +6644,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_712: # %cond.load713
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6661,7 +6661,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_713: # %cond.load717
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6678,7 +6678,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_714: # %cond.load721
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6695,7 +6695,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_715: # %cond.load725
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6712,7 +6712,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_716: # %cond.load729
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6729,7 +6729,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_717: # %cond.load733
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6746,7 +6746,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_718: # %cond.load737
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6763,7 +6763,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_719: # %cond.load741
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6780,7 +6780,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_720: # %cond.load745
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6797,7 +6797,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_721: # %cond.load749
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -6830,7 +6830,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_723: # %cond.load765
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6847,7 +6847,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_724: # %cond.load769
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6864,7 +6864,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_725: # %cond.load773
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6881,7 +6881,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_726: # %cond.load777
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6898,7 +6898,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_727: # %cond.load781
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6915,7 +6915,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_728: # %cond.load785
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6932,7 +6932,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_729: # %cond.load789
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6949,7 +6949,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_730: # %cond.load793
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6966,7 +6966,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_731: # %cond.load797
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -6983,7 +6983,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_732: # %cond.load801
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7000,7 +7000,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_733: # %cond.load805
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7017,7 +7017,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_734: # %cond.load809
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7034,7 +7034,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_735: # %cond.load813
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7051,7 +7051,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_736: # %cond.load817
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7068,7 +7068,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_737: # %cond.load821
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7085,7 +7085,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_738: # %cond.load825
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7102,7 +7102,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_739: # %cond.load829
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7119,7 +7119,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_740: # %cond.load833
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7136,7 +7136,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_741: # %cond.load837
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7153,7 +7153,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_742: # %cond.load841
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7170,7 +7170,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_743: # %cond.load845
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7187,7 +7187,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_744: # %cond.load849
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7204,7 +7204,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_745: # %cond.load853
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7221,7 +7221,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_746: # %cond.load857
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7238,7 +7238,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_747: # %cond.load861
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7255,7 +7255,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_748: # %cond.load865
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7272,7 +7272,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_749: # %cond.load869
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7289,7 +7289,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_750: # %cond.load873
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7306,7 +7306,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_751: # %cond.load877
; CHECK-RV32-NEXT: lbu a2, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v24, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a2
@@ -7339,7 +7339,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_753: # %cond.load893
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7356,7 +7356,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_754: # %cond.load897
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7373,7 +7373,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_755: # %cond.load901
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7390,7 +7390,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_756: # %cond.load905
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7407,7 +7407,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_757: # %cond.load909
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7424,7 +7424,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_758: # %cond.load913
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7441,7 +7441,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_759: # %cond.load917
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7458,7 +7458,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_760: # %cond.load921
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7475,7 +7475,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_761: # %cond.load925
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7492,7 +7492,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_762: # %cond.load929
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7509,7 +7509,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_763: # %cond.load933
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7526,7 +7526,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_764: # %cond.load937
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7543,7 +7543,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_765: # %cond.load941
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7560,7 +7560,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_766: # %cond.load945
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7577,7 +7577,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_767: # %cond.load949
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7594,7 +7594,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_768: # %cond.load953
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7611,7 +7611,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_769: # %cond.load957
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7628,7 +7628,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_770: # %cond.load961
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7645,7 +7645,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_771: # %cond.load965
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7662,7 +7662,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_772: # %cond.load969
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7679,7 +7679,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_773: # %cond.load973
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7696,7 +7696,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_774: # %cond.load977
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7713,7 +7713,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_775: # %cond.load981
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7730,7 +7730,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_776: # %cond.load985
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7747,7 +7747,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_777: # %cond.load989
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7764,7 +7764,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_778: # %cond.load993
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7781,7 +7781,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_779: # %cond.load997
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7798,7 +7798,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_780: # %cond.load1001
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -7815,7 +7815,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_781: # %cond.load1005
; CHECK-RV32-NEXT: lbu a3, 0(a0)
; CHECK-RV32-NEXT: li a4, 512
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV32-NEXT: vmv.s.x v12, a3
@@ -11239,7 +11239,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_62: # %cond.load241
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -11521,7 +11521,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_128: # %cond.load497
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -11803,7 +11803,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_194: # %cond.load753
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -12085,7 +12085,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_260: # %cond.load1009
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -13225,7 +13225,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_3
; CHECK-RV64-NEXT: .LBB61_529: # %cond.load5
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 3, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13238,7 +13238,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_4
; CHECK-RV64-NEXT: .LBB61_530: # %cond.load9
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13251,7 +13251,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_5
; CHECK-RV64-NEXT: .LBB61_531: # %cond.load13
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 5, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13264,7 +13264,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_6
; CHECK-RV64-NEXT: .LBB61_532: # %cond.load17
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 6, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13277,7 +13277,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_7
; CHECK-RV64-NEXT: .LBB61_533: # %cond.load21
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 7, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13290,7 +13290,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_8
; CHECK-RV64-NEXT: .LBB61_534: # %cond.load25
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 8, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13303,7 +13303,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_9
; CHECK-RV64-NEXT: .LBB61_535: # %cond.load29
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 9, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13316,7 +13316,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_10
; CHECK-RV64-NEXT: .LBB61_536: # %cond.load33
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 10, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13329,7 +13329,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_11
; CHECK-RV64-NEXT: .LBB61_537: # %cond.load37
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 11, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13342,7 +13342,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_12
; CHECK-RV64-NEXT: .LBB61_538: # %cond.load41
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 12, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13355,7 +13355,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_13
; CHECK-RV64-NEXT: .LBB61_539: # %cond.load45
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 13, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13368,7 +13368,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_14
; CHECK-RV64-NEXT: .LBB61_540: # %cond.load49
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 14, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13381,7 +13381,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_15
; CHECK-RV64-NEXT: .LBB61_541: # %cond.load53
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 15, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13394,7 +13394,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_16
; CHECK-RV64-NEXT: .LBB61_542: # %cond.load57
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 16, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13407,7 +13407,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_17
; CHECK-RV64-NEXT: .LBB61_543: # %cond.load61
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 17, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13420,7 +13420,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_18
; CHECK-RV64-NEXT: .LBB61_544: # %cond.load65
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 18, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13433,7 +13433,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_19
; CHECK-RV64-NEXT: .LBB61_545: # %cond.load69
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 19, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13446,7 +13446,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_20
; CHECK-RV64-NEXT: .LBB61_546: # %cond.load73
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 20, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13459,7 +13459,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_21
; CHECK-RV64-NEXT: .LBB61_547: # %cond.load77
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 21, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13472,7 +13472,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_22
; CHECK-RV64-NEXT: .LBB61_548: # %cond.load81
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 22, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13485,7 +13485,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_23
; CHECK-RV64-NEXT: .LBB61_549: # %cond.load85
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 23, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13498,7 +13498,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_24
; CHECK-RV64-NEXT: .LBB61_550: # %cond.load89
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 24, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13511,7 +13511,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_25
; CHECK-RV64-NEXT: .LBB61_551: # %cond.load93
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 25, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13524,7 +13524,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_26
; CHECK-RV64-NEXT: .LBB61_552: # %cond.load97
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 26, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13537,7 +13537,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_27
; CHECK-RV64-NEXT: .LBB61_553: # %cond.load101
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 27, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13550,7 +13550,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_28
; CHECK-RV64-NEXT: .LBB61_554: # %cond.load105
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 28, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13563,7 +13563,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_29
; CHECK-RV64-NEXT: .LBB61_555: # %cond.load109
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 29, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13576,7 +13576,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_30
; CHECK-RV64-NEXT: .LBB61_556: # %cond.load113
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 30, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13589,7 +13589,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: j .LBB61_31
; CHECK-RV64-NEXT: .LBB61_557: # %cond.load117
; CHECK-RV64-NEXT: lbu a1, 0(a0)
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetivli zero, 31, e8, m1, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13603,7 +13603,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_558: # %cond.load121
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13619,7 +13619,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_559: # %cond.load125
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13636,7 +13636,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_560: # %cond.load129
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13653,7 +13653,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_561: # %cond.load133
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13670,7 +13670,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_562: # %cond.load137
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13687,7 +13687,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_563: # %cond.load141
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13704,7 +13704,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_564: # %cond.load145
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13721,7 +13721,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_565: # %cond.load149
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13738,7 +13738,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_566: # %cond.load153
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13755,7 +13755,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_567: # %cond.load157
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13772,7 +13772,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_568: # %cond.load161
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13789,7 +13789,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_569: # %cond.load165
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13806,7 +13806,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_570: # %cond.load169
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13823,7 +13823,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_571: # %cond.load173
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13840,7 +13840,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_572: # %cond.load177
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13857,7 +13857,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_573: # %cond.load181
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13874,7 +13874,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_574: # %cond.load185
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13891,7 +13891,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_575: # %cond.load189
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13908,7 +13908,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_576: # %cond.load193
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13925,7 +13925,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_577: # %cond.load197
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13942,7 +13942,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_578: # %cond.load201
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13959,7 +13959,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_579: # %cond.load205
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13976,7 +13976,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_580: # %cond.load209
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -13993,7 +13993,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_581: # %cond.load213
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -14010,7 +14010,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_582: # %cond.load217
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -14027,7 +14027,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_583: # %cond.load221
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -14044,7 +14044,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_584: # %cond.load225
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -14061,7 +14061,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_585: # %cond.load229
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -14078,7 +14078,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_586: # %cond.load233
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -14095,7 +14095,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_587: # %cond.load237
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v9, a1
@@ -14128,7 +14128,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_589: # %cond.load253
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14145,7 +14145,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_590: # %cond.load257
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14162,7 +14162,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_591: # %cond.load261
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14179,7 +14179,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_592: # %cond.load265
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14196,7 +14196,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_593: # %cond.load269
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14213,7 +14213,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_594: # %cond.load273
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14230,7 +14230,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_595: # %cond.load277
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14247,7 +14247,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_596: # %cond.load281
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14264,7 +14264,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_597: # %cond.load285
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14281,7 +14281,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_598: # %cond.load289
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14298,7 +14298,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_599: # %cond.load293
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14315,7 +14315,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_600: # %cond.load297
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14332,7 +14332,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_601: # %cond.load301
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14349,7 +14349,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_602: # %cond.load305
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14366,7 +14366,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_603: # %cond.load309
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14383,7 +14383,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_604: # %cond.load313
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14400,7 +14400,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_605: # %cond.load317
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14417,7 +14417,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_606: # %cond.load321
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14434,7 +14434,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_607: # %cond.load325
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14451,7 +14451,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_608: # %cond.load329
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14468,7 +14468,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_609: # %cond.load333
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14485,7 +14485,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_610: # %cond.load337
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14502,7 +14502,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_611: # %cond.load341
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14519,7 +14519,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_612: # %cond.load345
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14536,7 +14536,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_613: # %cond.load349
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14553,7 +14553,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_614: # %cond.load353
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14570,7 +14570,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_615: # %cond.load357
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14587,7 +14587,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_616: # %cond.load361
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14604,7 +14604,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_617: # %cond.load365
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14621,7 +14621,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_618: # %cond.load369
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14638,7 +14638,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_619: # %cond.load373
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14655,7 +14655,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_620: # %cond.load377
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14672,7 +14672,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_621: # %cond.load381
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14689,7 +14689,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_622: # %cond.load385
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14706,7 +14706,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_623: # %cond.load389
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14723,7 +14723,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_624: # %cond.load393
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14740,7 +14740,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_625: # %cond.load397
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14757,7 +14757,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_626: # %cond.load401
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14774,7 +14774,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_627: # %cond.load405
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14791,7 +14791,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_628: # %cond.load409
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14808,7 +14808,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_629: # %cond.load413
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14825,7 +14825,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_630: # %cond.load417
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14842,7 +14842,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_631: # %cond.load421
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14859,7 +14859,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_632: # %cond.load425
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14876,7 +14876,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_633: # %cond.load429
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14893,7 +14893,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_634: # %cond.load433
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14910,7 +14910,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_635: # %cond.load437
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14927,7 +14927,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_636: # %cond.load441
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14944,7 +14944,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_637: # %cond.load445
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14961,7 +14961,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_638: # %cond.load449
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14978,7 +14978,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_639: # %cond.load453
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -14995,7 +14995,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_640: # %cond.load457
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -15012,7 +15012,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_641: # %cond.load461
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -15029,7 +15029,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_642: # %cond.load465
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -15046,7 +15046,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_643: # %cond.load469
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -15063,7 +15063,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_644: # %cond.load473
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -15080,7 +15080,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_645: # %cond.load477
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -15097,7 +15097,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_646: # %cond.load481
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -15114,7 +15114,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_647: # %cond.load485
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -15131,7 +15131,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_648: # %cond.load489
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -15148,7 +15148,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_649: # %cond.load493
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v10, a2
@@ -15181,7 +15181,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_651: # %cond.load509
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15198,7 +15198,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_652: # %cond.load513
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15215,7 +15215,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_653: # %cond.load517
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15232,7 +15232,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_654: # %cond.load521
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15249,7 +15249,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_655: # %cond.load525
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15266,7 +15266,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_656: # %cond.load529
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15283,7 +15283,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_657: # %cond.load533
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15300,7 +15300,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_658: # %cond.load537
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15317,7 +15317,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_659: # %cond.load541
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15334,7 +15334,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_660: # %cond.load545
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15351,7 +15351,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_661: # %cond.load549
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15368,7 +15368,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_662: # %cond.load553
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15385,7 +15385,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_663: # %cond.load557
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15402,7 +15402,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_664: # %cond.load561
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15419,7 +15419,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_665: # %cond.load565
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15436,7 +15436,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_666: # %cond.load569
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15453,7 +15453,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_667: # %cond.load573
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15470,7 +15470,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_668: # %cond.load577
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15487,7 +15487,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_669: # %cond.load581
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15504,7 +15504,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_670: # %cond.load585
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15521,7 +15521,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_671: # %cond.load589
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15538,7 +15538,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_672: # %cond.load593
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15555,7 +15555,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_673: # %cond.load597
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15572,7 +15572,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_674: # %cond.load601
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15589,7 +15589,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_675: # %cond.load605
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15606,7 +15606,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_676: # %cond.load609
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15623,7 +15623,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_677: # %cond.load613
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15640,7 +15640,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_678: # %cond.load617
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15657,7 +15657,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_679: # %cond.load621
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15674,7 +15674,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_680: # %cond.load625
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15691,7 +15691,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_681: # %cond.load629
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15708,7 +15708,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_682: # %cond.load633
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15725,7 +15725,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_683: # %cond.load637
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15742,7 +15742,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_684: # %cond.load641
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15759,7 +15759,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_685: # %cond.load645
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15776,7 +15776,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_686: # %cond.load649
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15793,7 +15793,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_687: # %cond.load653
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15810,7 +15810,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_688: # %cond.load657
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15827,7 +15827,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_689: # %cond.load661
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15844,7 +15844,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_690: # %cond.load665
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15861,7 +15861,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_691: # %cond.load669
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15878,7 +15878,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_692: # %cond.load673
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15895,7 +15895,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_693: # %cond.load677
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15912,7 +15912,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_694: # %cond.load681
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15929,7 +15929,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_695: # %cond.load685
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15946,7 +15946,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_696: # %cond.load689
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15963,7 +15963,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_697: # %cond.load693
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15980,7 +15980,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_698: # %cond.load697
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -15997,7 +15997,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_699: # %cond.load701
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16014,7 +16014,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_700: # %cond.load705
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16031,7 +16031,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_701: # %cond.load709
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16048,7 +16048,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_702: # %cond.load713
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16065,7 +16065,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_703: # %cond.load717
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16082,7 +16082,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_704: # %cond.load721
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16099,7 +16099,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_705: # %cond.load725
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16116,7 +16116,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_706: # %cond.load729
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16133,7 +16133,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_707: # %cond.load733
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16150,7 +16150,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_708: # %cond.load737
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16167,7 +16167,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_709: # %cond.load741
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16184,7 +16184,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_710: # %cond.load745
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16201,7 +16201,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_711: # %cond.load749
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a1
@@ -16234,7 +16234,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_713: # %cond.load765
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16251,7 +16251,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_714: # %cond.load769
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16268,7 +16268,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_715: # %cond.load773
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16285,7 +16285,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_716: # %cond.load777
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16302,7 +16302,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_717: # %cond.load781
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16319,7 +16319,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_718: # %cond.load785
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16336,7 +16336,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_719: # %cond.load789
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16353,7 +16353,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_720: # %cond.load793
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16370,7 +16370,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_721: # %cond.load797
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16387,7 +16387,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_722: # %cond.load801
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16404,7 +16404,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_723: # %cond.load805
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16421,7 +16421,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_724: # %cond.load809
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16438,7 +16438,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_725: # %cond.load813
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16455,7 +16455,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_726: # %cond.load817
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16472,7 +16472,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_727: # %cond.load821
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16489,7 +16489,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_728: # %cond.load825
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16506,7 +16506,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_729: # %cond.load829
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16523,7 +16523,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_730: # %cond.load833
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16540,7 +16540,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_731: # %cond.load837
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16557,7 +16557,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_732: # %cond.load841
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16574,7 +16574,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_733: # %cond.load845
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16591,7 +16591,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_734: # %cond.load849
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16608,7 +16608,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_735: # %cond.load853
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16625,7 +16625,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_736: # %cond.load857
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16642,7 +16642,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_737: # %cond.load861
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16659,7 +16659,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_738: # %cond.load865
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16676,7 +16676,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_739: # %cond.load869
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16693,7 +16693,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_740: # %cond.load873
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16710,7 +16710,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_741: # %cond.load877
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16727,7 +16727,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_742: # %cond.load881
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16744,7 +16744,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_743: # %cond.load885
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16761,7 +16761,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_744: # %cond.load889
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16778,7 +16778,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_745: # %cond.load893
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16795,7 +16795,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_746: # %cond.load897
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16812,7 +16812,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_747: # %cond.load901
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16829,7 +16829,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_748: # %cond.load905
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16846,7 +16846,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_749: # %cond.load909
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16863,7 +16863,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_750: # %cond.load913
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16880,7 +16880,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_751: # %cond.load917
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16897,7 +16897,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_752: # %cond.load921
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16914,7 +16914,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_753: # %cond.load925
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16931,7 +16931,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_754: # %cond.load929
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16948,7 +16948,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_755: # %cond.load933
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16965,7 +16965,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_756: # %cond.load937
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16982,7 +16982,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_757: # %cond.load941
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -16999,7 +16999,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_758: # %cond.load945
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17016,7 +17016,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_759: # %cond.load949
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17033,7 +17033,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_760: # %cond.load953
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17050,7 +17050,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_761: # %cond.load957
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17067,7 +17067,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_762: # %cond.load961
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17084,7 +17084,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_763: # %cond.load965
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17101,7 +17101,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_764: # %cond.load969
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17118,7 +17118,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_765: # %cond.load973
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17135,7 +17135,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_766: # %cond.load977
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17152,7 +17152,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_767: # %cond.load981
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17169,7 +17169,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_768: # %cond.load985
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17186,7 +17186,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_769: # %cond.load989
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17203,7 +17203,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_770: # %cond.load993
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17220,7 +17220,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_771: # %cond.load997
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17237,7 +17237,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_772: # %cond.load1001
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
@@ -17254,7 +17254,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_773: # %cond.load1005
; CHECK-RV64-NEXT: lbu a2, 0(a0)
; CHECK-RV64-NEXT: li a3, 512
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv8r.v v16, v8
; CHECK-RV64-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64-NEXT: vmv.s.x v12, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
index a983e8b99ebe7b..d0f1a3705a4018 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
@@ -13,7 +13,7 @@ define <vscale x 4 x i32> @extract_nxv8i32_nxv4i32_0(<vscale x 8 x i32> %vec) {
define <vscale x 4 x i32> @extract_nxv8i32_nxv4i32_4(<vscale x 8 x i32> %vec) {
; CHECK-LABEL: extract_nxv8i32_nxv4i32_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, i64 4)
@@ -31,7 +31,7 @@ define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_0(<vscale x 8 x i32> %vec) {
define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_2(<vscale x 8 x i32> %vec) {
; CHECK-LABEL: extract_nxv8i32_nxv2i32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 2)
@@ -41,7 +41,7 @@ define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_2(<vscale x 8 x i32> %vec) {
define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_4(<vscale x 8 x i32> %vec) {
; CHECK-LABEL: extract_nxv8i32_nxv2i32_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 4)
@@ -51,7 +51,7 @@ define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_4(<vscale x 8 x i32> %vec) {
define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_6(<vscale x 8 x i32> %vec) {
; CHECK-LABEL: extract_nxv8i32_nxv2i32_6:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 6)
@@ -69,7 +69,7 @@ define <vscale x 8 x i32> @extract_nxv16i32_nxv8i32_0(<vscale x 16 x i32> %vec)
define <vscale x 8 x i32> @extract_nxv16i32_nxv8i32_8(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv8i32_8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%c = call <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
@@ -87,7 +87,7 @@ define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_0(<vscale x 16 x i32> %vec)
define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_4(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv4i32_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 4)
@@ -97,7 +97,7 @@ define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_4(<vscale x 16 x i32> %vec)
define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_8(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv4i32_8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v12
; CHECK-NEXT: ret
%c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
@@ -107,7 +107,7 @@ define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_8(<vscale x 16 x i32> %vec)
define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_12(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv4i32_12:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: ret
%c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 12)
@@ -125,7 +125,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_0(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_2(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 2)
@@ -135,7 +135,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_2(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_4(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 4)
@@ -145,7 +145,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_4(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_6(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_6:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 6)
@@ -155,7 +155,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_6(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_8(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v12
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
@@ -165,7 +165,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_8(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_10(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_10:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 10)
@@ -175,7 +175,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_10(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_12(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_12:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v14
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 12)
@@ -185,7 +185,7 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_12(<vscale x 16 x i32> %vec)
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_14(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_14:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v15
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 14)
@@ -239,7 +239,7 @@ define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_15(<vscale x 16 x i32> %vec)
define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_2(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv1i32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 2)
@@ -303,7 +303,7 @@ define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_6(<vscale x 32 x i8> %vec) {
define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_8(<vscale x 32 x i8> %vec) {
; CHECK-LABEL: extract_nxv32i8_nxv2i8_8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 8)
@@ -374,7 +374,7 @@ define <vscale x 2 x half> @extract_nxv2f16_nxv16f16_2(<vscale x 16 x half> %vec
define <vscale x 2 x half> @extract_nxv2f16_nxv16f16_4(<vscale x 16 x half> %vec) {
; CHECK-LABEL: extract_nxv2f16_nxv16f16_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%c = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 4)
@@ -522,7 +522,7 @@ define <vscale x 2 x bfloat> @extract_nxv2bf16_nxv16bf16_2(<vscale x 16 x bfloat
define <vscale x 2 x bfloat> @extract_nxv2bf16_nxv16bf16_4(<vscale x 16 x bfloat> %vec) {
; CHECK-LABEL: extract_nxv2bf16_nxv16bf16_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%c = call <vscale x 2 x bfloat> @llvm.vector.extract.nxv2bf16.nxv16bf16(<vscale x 16 x bfloat> %vec, i64 4)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
index 114efd12d6f24c..029b93f94ada3b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
@@ -16,7 +16,7 @@ define <512 x i8> @single_source(<512 x i8> %a) {
; CHECK-NEXT: addi s0, sp, 1536
; CHECK-NEXT: .cfi_def_cfa s0, 0
; CHECK-NEXT: andi sp, sp, -512
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: li a0, 512
; CHECK-NEXT: addi a1, sp, 512
@@ -105,7 +105,7 @@ define <512 x i8> @two_source(<512 x i8> %a, <512 x i8> %b) {
; CHECK-NEXT: addi s0, sp, 1536
; CHECK-NEXT: .cfi_def_cfa s0, 0
; CHECK-NEXT: andi sp, sp, -512
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: li a0, 512
; CHECK-NEXT: addi a1, sp, 512
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
index 59f060d1bd7da7..e7a5e0e4acb76a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
@@ -1659,7 +1659,7 @@ define <15 x i64> @vp_bitreverse_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroex
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 24 * vlenb
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv8r.v v24, v8
; RV32-NEXT: lui a2, 1044480
; RV32-NEXT: lui a3, 61681
@@ -2056,7 +2056,7 @@ define <16 x i64> @vp_bitreverse_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroex
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 24 * vlenb
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv8r.v v24, v8
; RV32-NEXT: lui a2, 1044480
; RV32-NEXT: lui a3, 61681
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
index 6ee630f5b9a8c4..f3574e63fb1815 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
@@ -180,7 +180,7 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; CHECK-NEXT: .cfi_offset ra, -8
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: li a1, 2
; CHECK-NEXT: vmv8r.v v8, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
index b2bd974da34f40..e08edef85854d0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
@@ -180,7 +180,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; CHECK-NEXT: .cfi_offset ra, -8
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: li a1, 2
; CHECK-NEXT: vmv8r.v v8, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
index 515f94fb3d42d4..02a013c1d83100 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
@@ -194,7 +194,7 @@ define <8 x half> @vp_ceil_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
;
; ZVFHMIN-LABEL: vp_ceil_v8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -262,7 +262,7 @@ declare <16 x half> @llvm.vp.ceil.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -282,7 +282,7 @@ define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %e
;
; ZVFHMIN-LABEL: vp_ceil_v16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -434,7 +434,7 @@ declare <8 x float> @llvm.vp.ceil.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_ceil_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -479,7 +479,7 @@ declare <16 x float> @llvm.vp.ceil.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_ceil_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -566,7 +566,7 @@ declare <4 x double> @llvm.vp.ceil.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_ceil_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -611,7 +611,7 @@ declare <8 x double> @llvm.vp.ceil.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_ceil_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -656,7 +656,7 @@ declare <15 x double> @llvm.vp.ceil.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_ceil_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -701,7 +701,7 @@ declare <16 x double> @llvm.vp.ceil.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_ceil_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -752,7 +752,7 @@ define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
index 6edbc6afe0410a..541171441a753e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
@@ -1796,7 +1796,7 @@ define <32 x i64> @vp_ctpop_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 24 * vlenb
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv8r.v v24, v16
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: lui a2, 209715
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
index b3cf6fbd1bd767..420c30fa793429 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
@@ -194,7 +194,7 @@ define <8 x half> @vp_floor_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
;
; ZVFHMIN-LABEL: vp_floor_v8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -262,7 +262,7 @@ declare <16 x half> @llvm.vp.floor.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -282,7 +282,7 @@ define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
;
; ZVFHMIN-LABEL: vp_floor_v16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -434,7 +434,7 @@ declare <8 x float> @llvm.vp.floor.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_floor_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -479,7 +479,7 @@ declare <16 x float> @llvm.vp.floor.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_floor_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -566,7 +566,7 @@ declare <4 x double> @llvm.vp.floor.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_floor_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -611,7 +611,7 @@ declare <8 x double> @llvm.vp.floor.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_floor_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -656,7 +656,7 @@ declare <15 x double> @llvm.vp.floor.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_floor_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -701,7 +701,7 @@ declare <16 x double> @llvm.vp.floor.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_floor_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -752,7 +752,7 @@ define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
index ed71a9f0d7016a..fe5823660e653b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
@@ -13,7 +13,7 @@ declare <2 x half> @llvm.vp.maximum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32)
define <2 x half> @vfmax_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_v2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -27,7 +27,7 @@ define <2 x half> @vfmax_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i
;
; ZVFHMIN-LABEL: vfmax_vv_v2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -85,7 +85,7 @@ declare <4 x half> @llvm.vp.maximum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
define <4 x half> @vfmax_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_v4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -99,7 +99,7 @@ define <4 x half> @vfmax_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i
;
; ZVFHMIN-LABEL: vfmax_vv_v4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -157,7 +157,7 @@ declare <8 x half> @llvm.vp.maximum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32)
define <8 x half> @vfmax_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_v8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -171,7 +171,7 @@ define <8 x half> @vfmax_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i
;
; ZVFHMIN-LABEL: vfmax_vv_v8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -231,7 +231,7 @@ declare <16 x half> @llvm.vp.maximum.v16f16(<16 x half>, <16 x half>, <16 x i1>,
define <16 x half> @vfmax_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -247,7 +247,7 @@ define <16 x half> @vfmax_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1>
;
; ZVFHMIN-LABEL: vfmax_vv_v16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -307,7 +307,7 @@ declare <2 x float> @llvm.vp.maximum.v2f32(<2 x float>, <2 x float>, <2 x i1>, i
define <2 x float> @vfmax_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -342,7 +342,7 @@ declare <4 x float> @llvm.vp.maximum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i
define <4 x float> @vfmax_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -377,7 +377,7 @@ declare <8 x float> @llvm.vp.maximum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i
define <8 x float> @vfmax_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -414,7 +414,7 @@ declare <16 x float> @llvm.vp.maximum.v16f32(<16 x float>, <16 x float>, <16 x i
define <16 x float> @vfmax_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -451,7 +451,7 @@ declare <2 x double> @llvm.vp.maximum.v2f64(<2 x double>, <2 x double>, <2 x i1>
define <2 x double> @vfmax_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -486,7 +486,7 @@ declare <4 x double> @llvm.vp.maximum.v4f64(<4 x double>, <4 x double>, <4 x i1>
define <4 x double> @vfmax_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -523,7 +523,7 @@ declare <8 x double> @llvm.vp.maximum.v8f64(<8 x double>, <8 x double>, <8 x i1>
define <8 x double> @vfmax_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -566,7 +566,7 @@ define <16 x double> @vfmax_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
@@ -618,7 +618,7 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
index fdd02f84d413b2..606dded8aac90c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
@@ -13,7 +13,7 @@ declare <2 x half> @llvm.vp.minimum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32)
define <2 x half> @vfmin_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_v2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -27,7 +27,7 @@ define <2 x half> @vfmin_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i
;
; ZVFHMIN-LABEL: vfmin_vv_v2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -85,7 +85,7 @@ declare <4 x half> @llvm.vp.minimum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
define <4 x half> @vfmin_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_v4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -99,7 +99,7 @@ define <4 x half> @vfmin_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i
;
; ZVFHMIN-LABEL: vfmin_vv_v4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -157,7 +157,7 @@ declare <8 x half> @llvm.vp.minimum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32)
define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_v8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -171,7 +171,7 @@ define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i
;
; ZVFHMIN-LABEL: vfmin_vv_v8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -231,7 +231,7 @@ declare <16 x half> @llvm.vp.minimum.v16f16(<16 x half>, <16 x half>, <16 x i1>,
define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -247,7 +247,7 @@ define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1>
;
; ZVFHMIN-LABEL: vfmin_vv_v16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -307,7 +307,7 @@ declare <2 x float> @llvm.vp.minimum.v2f32(<2 x float>, <2 x float>, <2 x i1>, i
define <2 x float> @vfmin_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -342,7 +342,7 @@ declare <4 x float> @llvm.vp.minimum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i
define <4 x float> @vfmin_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -377,7 +377,7 @@ declare <8 x float> @llvm.vp.minimum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i
define <8 x float> @vfmin_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -414,7 +414,7 @@ declare <16 x float> @llvm.vp.minimum.v16f32(<16 x float>, <16 x float>, <16 x i
define <16 x float> @vfmin_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -451,7 +451,7 @@ declare <2 x double> @llvm.vp.minimum.v2f64(<2 x double>, <2 x double>, <2 x i1>
define <2 x double> @vfmin_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -486,7 +486,7 @@ declare <4 x double> @llvm.vp.minimum.v4f64(<4 x double>, <4 x double>, <4 x i1>
define <4 x double> @vfmin_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -523,7 +523,7 @@ declare <8 x double> @llvm.vp.minimum.v8f64(<8 x double>, <8 x double>, <8 x i1>
define <8 x double> @vfmin_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -566,7 +566,7 @@ define <16 x double> @vfmin_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
@@ -618,7 +618,7 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
index c1acf789a7b428..2d6accb0b89fac 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
@@ -38,7 +38,7 @@ define <4 x float> @interleave_v2f32(<2 x float> %x, <2 x float> %y) {
define <4 x double> @interleave_v2f64(<2 x double> %x, <2 x double> %y) {
; V128-LABEL: interleave_v2f64:
; V128: # %bb.0:
-; V128-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; V128-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; V128-NEXT: vmv1r.v v12, v9
; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; V128-NEXT: vid.v v9
@@ -243,7 +243,7 @@ define <64 x float> @interleave_v32f32(<32 x float> %x, <32 x float> %y) {
; V128-NEXT: slli a0, a0, 3
; V128-NEXT: sub sp, sp, a0
; V128-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; V128-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; V128-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; V128-NEXT: vmv8r.v v24, v16
; V128-NEXT: vmv8r.v v16, v8
; V128-NEXT: vmv8r.v v8, v24
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll
index 1b37e4af1bba03..3e77a7d2412f2b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll
@@ -97,7 +97,7 @@ declare <32 x float> @llvm.vp.fptrunc.v32f64.v32f32(<32 x double>, <32 x i1>, i3
define <32 x float> @vfptrunc_v32f32_v32f64(<32 x double> %a, <32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vfptrunc_v32f32_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll
index 1c4cf15c07f272..8c5a0100b733a7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll
@@ -712,7 +712,7 @@ define <16 x i64> @fshl_v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index 872f7ebf245f8e..2a6ae31c709394 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -133,7 +133,7 @@ define <vscale x 2 x i32> @insert_nxv8i32_v4i32_0(<vscale x 2 x i32> %vec, <4 x
;
; VLS-LABEL: insert_nxv8i32_v4i32_0:
; VLS: # %bb.0:
-; VLS-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; VLS-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; VLS-NEXT: vmv1r.v v8, v9
; VLS-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v4i32(<vscale x 2 x i32> %vec, <4 x i32> %subvec, i64 0)
@@ -144,7 +144,7 @@ define <vscale x 2 x i32> @insert_nxv8i32_v4i32_0(<vscale x 2 x i32> %vec, <4 x
define <4 x i32> @insert_v4i32_v4i32_0(<4 x i32> %vec, <4 x i32> %subvec) {
; CHECK-LABEL: insert_v4i32_v4i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vector.insert.v4i32.v4i32(<4 x i32> %vec, <4 x i32> %subvec, i64 0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
index 215097b6f220cf..4ab49ed2647e22 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
@@ -51,7 +51,7 @@ define <4 x i32> @interleave_v2i32(<2 x i32> %x, <2 x i32> %y) {
define <4 x i64> @interleave_v2i64(<2 x i64> %x, <2 x i64> %y) {
; V128-LABEL: interleave_v2i64:
; V128: # %bb.0:
-; V128-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; V128-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; V128-NEXT: vmv1r.v v12, v9
; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; V128-NEXT: vid.v v9
@@ -412,7 +412,7 @@ define <64 x i32> @interleave_v32i32(<32 x i32> %x, <32 x i32> %y) {
; V128-NEXT: slli a0, a0, 3
; V128-NEXT: sub sp, sp, a0
; V128-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; V128-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; V128-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; V128-NEXT: vmv8r.v v24, v16
; V128-NEXT: vmv8r.v v16, v8
; V128-NEXT: vmv8r.v v8, v24
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 1d85293003b333..fbe51b5083cecd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -556,13 +556,13 @@ define <4 x i8> @mgather_truemask_v4i8(<4 x ptr> %ptrs, <4 x i8> %passthru) {
define <4 x i8> @mgather_falsemask_v4i8(<4 x ptr> %ptrs, <4 x i8> %passthru) {
; RV32-LABEL: mgather_falsemask_v4i8:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4i8:
; RV64V: # %bb.0:
-; RV64V-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64V-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64V-NEXT: vmv1r.v v8, v10
; RV64V-NEXT: ret
;
@@ -779,7 +779,7 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-NEXT: .LBB12_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB12_14: # %cond.load4
@@ -1252,13 +1252,13 @@ define <4 x i16> @mgather_truemask_v4i16(<4 x ptr> %ptrs, <4 x i16> %passthru) {
define <4 x i16> @mgather_falsemask_v4i16(<4 x ptr> %ptrs, <4 x i16> %passthru) {
; RV32-LABEL: mgather_falsemask_v4i16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4i16:
; RV64V: # %bb.0:
-; RV64V-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64V-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64V-NEXT: vmv1r.v v8, v10
; RV64V-NEXT: ret
;
@@ -1486,7 +1486,7 @@ define <8 x i16> @mgather_baseidx_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-NEXT: .LBB23_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB23_14: # %cond.load4
@@ -1635,7 +1635,7 @@ define <8 x i16> @mgather_baseidx_sext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-NEXT: .LBB24_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB24_14: # %cond.load4
@@ -1788,7 +1788,7 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-NEXT: .LBB25_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB25_14: # %cond.load4
@@ -1935,7 +1935,7 @@ define <8 x i16> @mgather_baseidx_v8i16(ptr %base, <8 x i16> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-NEXT: .LBB26_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB26_14: # %cond.load4
@@ -2300,13 +2300,13 @@ define <4 x i32> @mgather_truemask_v4i32(<4 x ptr> %ptrs, <4 x i32> %passthru) {
define <4 x i32> @mgather_falsemask_v4i32(<4 x ptr> %ptrs, <4 x i32> %passthru) {
; RV32-LABEL: mgather_falsemask_v4i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4i32:
; RV64V: # %bb.0:
-; RV64V-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64V-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64V-NEXT: vmv1r.v v8, v10
; RV64V-NEXT: ret
;
@@ -2533,7 +2533,7 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB35_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB35_14: # %cond.load4
@@ -2681,7 +2681,7 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB36_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB36_14: # %cond.load4
@@ -2836,7 +2836,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB37_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB37_14: # %cond.load4
@@ -2989,7 +2989,7 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB38_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB38_14: # %cond.load4
@@ -3138,7 +3138,7 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB39_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB39_14: # %cond.load4
@@ -3294,7 +3294,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB40_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB40_14: # %cond.load4
@@ -3440,7 +3440,7 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB41_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB41_14: # %cond.load4
@@ -3792,13 +3792,13 @@ define <4 x i64> @mgather_truemask_v4i64(<4 x ptr> %ptrs, <4 x i64> %passthru) {
define <4 x i64> @mgather_falsemask_v4i64(<4 x ptr> %ptrs, <4 x i64> %passthru) {
; RV32V-LABEL: mgather_falsemask_v4i64:
; RV32V: # %bb.0:
-; RV32V-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32V-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32V-NEXT: vmv2r.v v8, v10
; RV32V-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4i64:
; RV64V: # %bb.0:
-; RV64V-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64V-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64V-NEXT: vmv2r.v v8, v10
; RV64V-NEXT: ret
;
@@ -7085,13 +7085,13 @@ define <4 x bfloat> @mgather_truemask_v4bf16(<4 x ptr> %ptrs, <4 x bfloat> %pass
define <4 x bfloat> @mgather_falsemask_v4bf16(<4 x ptr> %ptrs, <4 x bfloat> %passthru) {
; RV32-LABEL: mgather_falsemask_v4bf16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4bf16:
; RV64V: # %bb.0:
-; RV64V-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64V-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64V-NEXT: vmv1r.v v8, v10
; RV64V-NEXT: ret
;
@@ -7319,7 +7319,7 @@ define <8 x bfloat> @mgather_baseidx_v8i8_v8bf16(ptr %base, <8 x i8> %idxs, <8 x
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-NEXT: .LBB64_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB64_14: # %cond.load4
@@ -7468,7 +7468,7 @@ define <8 x bfloat> @mgather_baseidx_sext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-NEXT: .LBB65_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB65_14: # %cond.load4
@@ -7621,7 +7621,7 @@ define <8 x bfloat> @mgather_baseidx_zext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-NEXT: .LBB66_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB66_14: # %cond.load4
@@ -7768,7 +7768,7 @@ define <8 x bfloat> @mgather_baseidx_v8bf16(ptr %base, <8 x i16> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-NEXT: .LBB67_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB67_14: # %cond.load4
@@ -8097,13 +8097,13 @@ define <4 x half> @mgather_truemask_v4f16(<4 x ptr> %ptrs, <4 x half> %passthru)
define <4 x half> @mgather_falsemask_v4f16(<4 x ptr> %ptrs, <4 x half> %passthru) {
; RV32-LABEL: mgather_falsemask_v4f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4f16:
; RV64V: # %bb.0:
-; RV64V-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64V-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64V-NEXT: vmv1r.v v8, v10
; RV64V-NEXT: ret
;
@@ -8424,7 +8424,7 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-ZVFH-NEXT: .LBB74_13: # %else20
-; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFH-NEXT: ret
; RV64ZVE32F-ZVFH-NEXT: .LBB74_14: # %cond.load4
@@ -8548,7 +8548,7 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_13: # %else20
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFHMIN-NEXT: ret
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB74_14: # %cond.load4
@@ -8697,7 +8697,7 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-ZVFH-NEXT: .LBB75_13: # %else20
-; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFH-NEXT: ret
; RV64ZVE32F-ZVFH-NEXT: .LBB75_14: # %cond.load4
@@ -8821,7 +8821,7 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_13: # %else20
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFHMIN-NEXT: ret
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB75_14: # %cond.load4
@@ -8974,7 +8974,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-ZVFH-NEXT: .LBB76_13: # %else20
-; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFH-NEXT: ret
; RV64ZVE32F-ZVFH-NEXT: .LBB76_14: # %cond.load4
@@ -9106,7 +9106,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_13: # %else20
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFHMIN-NEXT: ret
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_14: # %cond.load4
@@ -9253,7 +9253,7 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-ZVFH-NEXT: .LBB77_13: # %else20
-; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-ZVFH-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFH-NEXT: ret
; RV64ZVE32F-ZVFH-NEXT: .LBB77_14: # %cond.load4
@@ -9369,7 +9369,7 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslideup.vi v9, v8, 7
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_13: # %else20
-; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-ZVFHMIN-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-ZVFHMIN-NEXT: ret
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB77_14: # %cond.load4
@@ -9606,13 +9606,13 @@ define <4 x float> @mgather_truemask_v4f32(<4 x ptr> %ptrs, <4 x float> %passthr
define <4 x float> @mgather_falsemask_v4f32(<4 x ptr> %ptrs, <4 x float> %passthru) {
; RV32-LABEL: mgather_falsemask_v4f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4f32:
; RV64V: # %bb.0:
-; RV64V-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64V-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64V-NEXT: vmv1r.v v8, v10
; RV64V-NEXT: ret
;
@@ -9839,7 +9839,7 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB84_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB84_14: # %cond.load4
@@ -9987,7 +9987,7 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB85_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB85_14: # %cond.load4
@@ -10142,7 +10142,7 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB86_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB86_14: # %cond.load4
@@ -10295,7 +10295,7 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB87_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB87_14: # %cond.load4
@@ -10444,7 +10444,7 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB88_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB88_14: # %cond.load4
@@ -10600,7 +10600,7 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB89_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB89_14: # %cond.load4
@@ -10746,7 +10746,7 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7
; RV64ZVE32F-NEXT: .LBB90_13: # %else20
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB90_14: # %cond.load4
@@ -11056,13 +11056,13 @@ define <4 x double> @mgather_truemask_v4f64(<4 x ptr> %ptrs, <4 x double> %passt
define <4 x double> @mgather_falsemask_v4f64(<4 x ptr> %ptrs, <4 x double> %passthru) {
; RV32V-LABEL: mgather_falsemask_v4f64:
; RV32V: # %bb.0:
-; RV32V-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32V-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32V-NEXT: vmv2r.v v8, v10
; RV32V-NEXT: ret
;
; RV64V-LABEL: mgather_falsemask_v4f64:
; RV64V: # %bb.0:
-; RV64V-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64V-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64V-NEXT: vmv2r.v v8, v10
; RV64V-NEXT: ret
;
@@ -13623,7 +13623,7 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 15
; RV64ZVE32F-NEXT: .LBB107_24: # %else44
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv1r.v v8, v9
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB107_25: # %cond.load4
@@ -14010,7 +14010,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 31
; RV64ZVE32F-NEXT: .LBB108_48: # %else92
-; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64ZVE32F-NEXT: vmv2r.v v8, v10
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB108_49: # %cond.load4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
index c4f78288ef110b..9c2848847f069c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
@@ -318,7 +318,7 @@ define <128 x i16> @masked_load_v128i16(ptr %a, <128 x i1> %mask) {
define <256 x i8> @masked_load_v256i8(ptr %a, <256 x i1> %mask) {
; CHECK-LABEL: masked_load_v256i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
index b32c8a9dbcf368..b00a5c90bf4ce3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
@@ -135,7 +135,7 @@ declare <16 x half> @llvm.vp.nearbyint.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_nearbyint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI6_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -264,7 +264,7 @@ declare <8 x float> @llvm.vp.nearbyint.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_nearbyint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -309,7 +309,7 @@ declare <16 x float> @llvm.vp.nearbyint.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_nearbyint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -396,7 +396,7 @@ declare <4 x double> @llvm.vp.nearbyint.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_nearbyint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -441,7 +441,7 @@ declare <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_nearbyint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -486,7 +486,7 @@ declare <15 x double> @llvm.vp.nearbyint.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_nearbyint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -531,7 +531,7 @@ declare <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_nearbyint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -576,7 +576,7 @@ declare <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll
index 1d1a97e7a72c30..52eccd9dc345dd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll
@@ -23,7 +23,7 @@ declare i1 @llvm.vp.reduce.or.v1i1(i1, <1 x i1>, <1 x i1>, i32)
define zeroext i1 @vpreduce_or_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_v1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -40,7 +40,7 @@ declare i1 @llvm.vp.reduce.xor.v1i1(i1, <1 x i1>, <1 x i1>, i32)
define zeroext i1 @vpreduce_xor_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_v1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -73,7 +73,7 @@ declare i1 @llvm.vp.reduce.or.v2i1(i1, <2 x i1>, <2 x i1>, i32)
define zeroext i1 @vpreduce_or_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_v2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -90,7 +90,7 @@ declare i1 @llvm.vp.reduce.xor.v2i1(i1, <2 x i1>, <2 x i1>, i32)
define zeroext i1 @vpreduce_xor_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_v2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -123,7 +123,7 @@ declare i1 @llvm.vp.reduce.or.v4i1(i1, <4 x i1>, <4 x i1>, i32)
define zeroext i1 @vpreduce_or_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_v4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -140,7 +140,7 @@ declare i1 @llvm.vp.reduce.xor.v4i1(i1, <4 x i1>, <4 x i1>, i32)
define zeroext i1 @vpreduce_xor_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_v4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -173,7 +173,7 @@ declare i1 @llvm.vp.reduce.or.v8i1(i1, <8 x i1>, <8 x i1>, i32)
define zeroext i1 @vpreduce_or_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_v8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -190,7 +190,7 @@ declare i1 @llvm.vp.reduce.xor.v8i1(i1, <8 x i1>, <8 x i1>, i32)
define zeroext i1 @vpreduce_xor_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_v8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -239,7 +239,7 @@ declare i1 @llvm.vp.reduce.and.v256i1(i1, <256 x i1>, <256 x i1>, i32)
define zeroext i1 @vpreduce_and_v256i1(i1 zeroext %s, <256 x i1> %v, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_and_v256i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v9
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: li a3, 128
@@ -248,7 +248,7 @@ define zeroext i1 @vpreduce_and_v256i1(i1 zeroext %s, <256 x i1> %v, <256 x i1>
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB14_2:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmnot.m v9, v9
@@ -275,7 +275,7 @@ declare i1 @llvm.vp.reduce.or.v16i1(i1, <16 x i1>, <16 x i1>, i32)
define zeroext i1 @vpreduce_or_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_v16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -292,7 +292,7 @@ declare i1 @llvm.vp.reduce.xor.v16i1(i1, <16 x i1>, <16 x i1>, i32)
define zeroext i1 @vpreduce_xor_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_v16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -309,7 +309,7 @@ declare i1 @llvm.vp.reduce.add.v1i1(i1, <1 x i1>, <1 x i1>, i32)
define zeroext i1 @vpreduce_add_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_v1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -326,7 +326,7 @@ declare i1 @llvm.vp.reduce.add.v2i1(i1, <2 x i1>, <2 x i1>, i32)
define zeroext i1 @vpreduce_add_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_v2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -343,7 +343,7 @@ declare i1 @llvm.vp.reduce.add.v4i1(i1, <4 x i1>, <4 x i1>, i32)
define zeroext i1 @vpreduce_add_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_v4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -360,7 +360,7 @@ declare i1 @llvm.vp.reduce.add.v8i1(i1, <8 x i1>, <8 x i1>, i32)
define zeroext i1 @vpreduce_add_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_v8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -377,7 +377,7 @@ declare i1 @llvm.vp.reduce.add.v16i1(i1, <16 x i1>, <16 x i1>, i32)
define zeroext i1 @vpreduce_add_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_v16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -506,7 +506,7 @@ declare i1 @llvm.vp.reduce.smin.v1i1(i1, <1 x i1>, <1 x i1>, i32)
define zeroext i1 @vpreduce_smin_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -523,7 +523,7 @@ declare i1 @llvm.vp.reduce.smin.v2i1(i1, <2 x i1>, <2 x i1>, i32)
define zeroext i1 @vpreduce_smin_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -540,7 +540,7 @@ declare i1 @llvm.vp.reduce.smin.v4i1(i1, <4 x i1>, <4 x i1>, i32)
define zeroext i1 @vpreduce_smin_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -557,7 +557,7 @@ declare i1 @llvm.vp.reduce.smin.v8i1(i1, <8 x i1>, <8 x i1>, i32)
define zeroext i1 @vpreduce_smin_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -574,7 +574,7 @@ declare i1 @llvm.vp.reduce.smin.v16i1(i1, <16 x i1>, <16 x i1>, i32)
define zeroext i1 @vpreduce_smin_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -591,7 +591,7 @@ declare i1 @llvm.vp.reduce.smin.v32i1(i1, <32 x i1>, <32 x i1>, i32)
define zeroext i1 @vpreduce_smin_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v32i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -608,7 +608,7 @@ declare i1 @llvm.vp.reduce.smin.v64i1(i1, <64 x i1>, <64 x i1>, i32)
define zeroext i1 @vpreduce_smin_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_v64i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -625,7 +625,7 @@ declare i1 @llvm.vp.reduce.umax.v1i1(i1, <1 x i1>, <1 x i1>, i32)
define zeroext i1 @vpreduce_umax_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -642,7 +642,7 @@ declare i1 @llvm.vp.reduce.umax.v2i1(i1, <2 x i1>, <2 x i1>, i32)
define zeroext i1 @vpreduce_umax_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -659,7 +659,7 @@ declare i1 @llvm.vp.reduce.umax.v4i1(i1, <4 x i1>, <4 x i1>, i32)
define zeroext i1 @vpreduce_umax_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -676,7 +676,7 @@ declare i1 @llvm.vp.reduce.umax.v8i1(i1, <8 x i1>, <8 x i1>, i32)
define zeroext i1 @vpreduce_umax_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -693,7 +693,7 @@ declare i1 @llvm.vp.reduce.umax.v16i1(i1, <16 x i1>, <16 x i1>, i32)
define zeroext i1 @vpreduce_umax_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -710,7 +710,7 @@ declare i1 @llvm.vp.reduce.umax.v32i1(i1, <32 x i1>, <32 x i1>, i32)
define zeroext i1 @vpreduce_umax_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v32i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -727,7 +727,7 @@ declare i1 @llvm.vp.reduce.umax.v64i1(i1, <64 x i1>, <64 x i1>, i32)
define zeroext i1 @vpreduce_umax_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v64i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
index b6157da37e4311..b06d60daa0ae38 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
@@ -123,7 +123,7 @@ declare <16 x half> @llvm.vp.rint.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_rint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI6_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -240,7 +240,7 @@ declare <8 x float> @llvm.vp.rint.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_rint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -281,7 +281,7 @@ declare <16 x float> @llvm.vp.rint.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_rint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -360,7 +360,7 @@ declare <4 x double> @llvm.vp.rint.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_rint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -401,7 +401,7 @@ declare <8 x double> @llvm.vp.rint.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_rint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -442,7 +442,7 @@ declare <15 x double> @llvm.vp.rint.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_rint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -483,7 +483,7 @@ declare <16 x double> @llvm.vp.rint.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_rint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -524,7 +524,7 @@ declare <32 x double> @llvm.vp.rint.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_rint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
index 4afb96f211a880..0b5203b7230b11 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
@@ -194,7 +194,7 @@ define <8 x half> @vp_round_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
;
; ZVFHMIN-LABEL: vp_round_v8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -262,7 +262,7 @@ declare <16 x half> @llvm.vp.round.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -282,7 +282,7 @@ define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
;
; ZVFHMIN-LABEL: vp_round_v16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -434,7 +434,7 @@ declare <8 x float> @llvm.vp.round.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_round_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -479,7 +479,7 @@ declare <16 x float> @llvm.vp.round.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_round_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -566,7 +566,7 @@ declare <4 x double> @llvm.vp.round.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_round_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -611,7 +611,7 @@ declare <8 x double> @llvm.vp.round.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_round_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -656,7 +656,7 @@ declare <15 x double> @llvm.vp.round.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_round_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -701,7 +701,7 @@ declare <16 x double> @llvm.vp.round.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_round_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -752,7 +752,7 @@ define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
index ef7cb78ff03a71..af92eb3282f536 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
@@ -194,7 +194,7 @@ define <8 x half> @vp_roundeven_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %
;
; ZVFHMIN-LABEL: vp_roundeven_v8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -262,7 +262,7 @@ declare <16 x half> @llvm.vp.roundeven.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -282,7 +282,7 @@ define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroe
;
; ZVFHMIN-LABEL: vp_roundeven_v16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -434,7 +434,7 @@ declare <8 x float> @llvm.vp.roundeven.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_roundeven_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -479,7 +479,7 @@ declare <16 x float> @llvm.vp.roundeven.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_roundeven_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -566,7 +566,7 @@ declare <4 x double> @llvm.vp.roundeven.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_roundeven_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -611,7 +611,7 @@ declare <8 x double> @llvm.vp.roundeven.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_roundeven_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -656,7 +656,7 @@ declare <15 x double> @llvm.vp.roundeven.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_roundeven_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -701,7 +701,7 @@ declare <16 x double> @llvm.vp.roundeven.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_roundeven_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -752,7 +752,7 @@ define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
index 29bffdf43289df..c47c9ab6d29abf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
@@ -194,7 +194,7 @@ define <8 x half> @vp_roundtozero_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext
;
; ZVFHMIN-LABEL: vp_roundtozero_v8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -262,7 +262,7 @@ declare <16 x half> @llvm.vp.roundtozero.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
@@ -282,7 +282,7 @@ define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zer
;
; ZVFHMIN-LABEL: vp_roundtozero_v16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -434,7 +434,7 @@ declare <8 x float> @llvm.vp.roundtozero.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x float> @vp_roundtozero_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -479,7 +479,7 @@ declare <16 x float> @llvm.vp.roundtozero.v16f32(<16 x float>, <16 x i1>, i32)
define <16 x float> @vp_roundtozero_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -566,7 +566,7 @@ declare <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_roundtozero_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
@@ -611,7 +611,7 @@ declare <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_roundtozero_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
@@ -656,7 +656,7 @@ declare <15 x double> @llvm.vp.roundtozero.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_roundtozero_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
@@ -701,7 +701,7 @@ declare <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_roundtozero_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
@@ -752,7 +752,7 @@ define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
index db28e6a8ad2e7f..b80cdba9426013 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
@@ -598,7 +598,7 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1>
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -649,7 +649,7 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1>
define <256 x i1> @icmp_eq_vx_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_v256i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
@@ -679,7 +679,7 @@ define <256 x i1> @icmp_eq_vx_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 z
define <256 x i1> @icmp_eq_vx_swap_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_swap_v256i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
index 96012c21bc5671..8a5943e6a30c78 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
@@ -8,7 +8,7 @@
define <8 x i32> @concat_2xv4i32(<4 x i32> %a, <4 x i32> %b) {
; VLA-LABEL: concat_2xv4i32:
; VLA: # %bb.0:
-; VLA-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; VLA-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; VLA-NEXT: vmv1r.v v10, v9
; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; VLA-NEXT: vslideup.vi v8, v10, 4
@@ -33,7 +33,7 @@ define <8 x i32> @concat_4xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x
;
; VLS-LABEL: concat_4xv2i32:
; VLS: # %bb.0:
-; VLS-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; VLS-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; VLS-NEXT: vmv1r.v v13, v10
; VLS-NEXT: vmv1r.v v12, v8
; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma
@@ -64,7 +64,7 @@ define <8 x i32> @concat_8xv1i32(<1 x i32> %a, <1 x i32> %b, <1 x i32> %c, <1 x
;
; VLS-LABEL: concat_8xv1i32:
; VLS: # %bb.0:
-; VLS-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; VLS-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; VLS-NEXT: vmv1r.v v17, v12
; VLS-NEXT: vmv1r.v v16, v8
; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
@@ -92,7 +92,7 @@ define <8 x i32> @concat_8xv1i32(<1 x i32> %a, <1 x i32> %b, <1 x i32> %c, <1 x
define <16 x i32> @concat_2xv8i32(<8 x i32> %a, <8 x i32> %b) {
; VLA-LABEL: concat_2xv8i32:
; VLA: # %bb.0:
-; VLA-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; VLA-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; VLA-NEXT: vmv2r.v v12, v10
; VLA-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; VLA-NEXT: vslideup.vi v8, v12, 8
@@ -108,7 +108,7 @@ define <16 x i32> @concat_2xv8i32(<8 x i32> %a, <8 x i32> %b) {
define <16 x i32> @concat_4xv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
; VLA-LABEL: concat_4xv4i32:
; VLA: # %bb.0:
-; VLA-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; VLA-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; VLA-NEXT: vmv1r.v v14, v11
; VLA-NEXT: vmv1r.v v12, v10
; VLA-NEXT: vmv1r.v v10, v9
@@ -145,7 +145,7 @@ define <16 x i32> @concat_8xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x
;
; VLS-LABEL: concat_8xv2i32:
; VLS: # %bb.0:
-; VLS-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; VLS-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; VLS-NEXT: vmv1r.v v19, v14
; VLS-NEXT: vmv1r.v v18, v12
; VLS-NEXT: vmv1r.v v17, v10
@@ -170,7 +170,7 @@ define <16 x i32> @concat_8xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x
define <32 x i32> @concat_2xv16i32(<16 x i32> %a, <16 x i32> %b) {
; VLA-LABEL: concat_2xv16i32:
; VLA: # %bb.0:
-; VLA-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; VLA-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; VLA-NEXT: vmv4r.v v16, v12
; VLA-NEXT: li a0, 32
; VLA-NEXT: vsetvli zero, a0, e32, m8, ta, ma
@@ -187,7 +187,7 @@ define <32 x i32> @concat_2xv16i32(<16 x i32> %a, <16 x i32> %b) {
define <32 x i32> @concat_4xv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; VLA-LABEL: concat_4xv8i32:
; VLA: # %bb.0:
-; VLA-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; VLA-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; VLA-NEXT: vmv2r.v v20, v14
; VLA-NEXT: vmv2r.v v16, v12
; VLA-NEXT: vmv2r.v v12, v10
@@ -211,7 +211,7 @@ define <32 x i32> @concat_4xv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x
define <32 x i32> @concat_8xv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d, <4 x i32> %e, <4 x i32> %f, <4 x i32> %g, <4 x i32> %h) {
; VLA-LABEL: concat_8xv4i32:
; VLA: # %bb.0:
-; VLA-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; VLA-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; VLA-NEXT: vmv1r.v v18, v15
; VLA-NEXT: vmv1r.v v20, v14
; VLA-NEXT: vmv1r.v v14, v13
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
index 99d4b8880cc204..36a92b3fbee2ac 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
@@ -108,7 +108,7 @@ define <4 x i64> @m2_splat_into_identity(<4 x i64> %v1) vscale_range(2,2) {
define <4 x i64> @m2_broadcast_i128(<4 x i64> %v1) vscale_range(2,2) {
; CHECK-LABEL: m2_broadcast_i128:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: ret
%res = shufflevector <4 x i64> %v1, <4 x i64> poison, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -118,7 +118,7 @@ define <4 x i64> @m2_broadcast_i128(<4 x i64> %v1) vscale_range(2,2) {
define <8 x i64> @m4_broadcast_i128(<8 x i64> %v1) vscale_range(2,2) {
; CHECK-LABEL: m4_broadcast_i128:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
index ec235f75c0cc3c..e7e38de4e22b2e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
@@ -966,7 +966,7 @@ define <16 x i8> @reverse_v16i8_2(<8 x i8> %a, <8 x i8> %b) {
define <32 x i8> @reverse_v32i8_2(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: reverse_v32i8_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
@@ -1036,7 +1036,7 @@ define <8 x i16> @reverse_v8i16_2(<4 x i16> %a, <4 x i16> %b) {
define <16 x i16> @reverse_v16i16_2(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: reverse_v16i16_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -1062,7 +1062,7 @@ define <16 x i16> @reverse_v16i16_2(<8 x i16> %a, <8 x i16> %b) {
define <32 x i16> @reverse_v32i16_2(<16 x i16> %a, <16 x i16> %b) {
; CHECK-LABEL: reverse_v32i16_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -1119,7 +1119,7 @@ define <4 x i32> @reverse_v4i32_2(<2 x i32> %a, < 2 x i32> %b) {
define <8 x i32> @reverse_v8i32_2(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: reverse_v8i32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -1146,7 +1146,7 @@ define <8 x i32> @reverse_v8i32_2(<4 x i32> %a, <4 x i32> %b) {
define <16 x i32> @reverse_v16i32_2(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: reverse_v16i32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -1175,7 +1175,7 @@ define <16 x i32> @reverse_v16i32_2(<8 x i32> %a, <8 x i32> %b) {
define <32 x i32> @reverse_v32i32_2(<16 x i32> %a, <16 x i32> %b) {
; CHECK-LABEL: reverse_v32i32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -1225,7 +1225,7 @@ define <4 x i64> @reverse_v4i64_2(<2 x i64> %a, < 2 x i64> %b) {
define <8 x i64> @reverse_v8i64_2(<4 x i64> %a, <4 x i64> %b) {
; CHECK-LABEL: reverse_v8i64_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
@@ -1296,7 +1296,7 @@ define <8 x half> @reverse_v8f16_2(<4 x half> %a, <4 x half> %b) {
define <16 x half> @reverse_v16f16_2(<8 x half> %a, <8 x half> %b) {
; CHECK-LABEL: reverse_v16f16_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -1369,7 +1369,7 @@ define <4 x float> @reverse_v4f32_2(<2 x float> %a, <2 x float> %b) {
define <8 x float> @reverse_v8f32_2(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: reverse_v8f32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -1396,7 +1396,7 @@ define <8 x float> @reverse_v8f32_2(<4 x float> %a, <4 x float> %b) {
define <16 x float> @reverse_v16f32_2(<8 x float> %a, <8 x float> %b) {
; CHECK-LABEL: reverse_v16f32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -1440,7 +1440,7 @@ define <4 x double> @reverse_v4f64_2(<2 x double> %a, < 2 x double> %b) {
define <8 x double> @reverse_v8f64_2(<4 x double> %a, <4 x double> %b) {
; CHECK-LABEL: reverse_v8f64_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll
index f0360c2bdd20b0..051a34391ac07b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll
@@ -415,7 +415,7 @@ define <4 x i8> @vslide1up_4xi8_neg_incorrect_insert3(<4 x i8> %v, i8 %b) {
define <2 x i8> @vslide1up_4xi8_neg_length_changing(<4 x i8> %v, i8 %b) {
; CHECK-LABEL: vslide1up_4xi8_neg_length_changing:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
index d98396538e8ee0..78fb9a349d1496 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
@@ -62,7 +62,7 @@ define void @gather_masked(ptr noalias nocapture %A, ptr noalias nocapture reado
; CHECK-NEXT: li a4, 5
; CHECK-NEXT: .LBB1_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, mu
; CHECK-NEXT: vlse8.v v9, (a1), a4, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
index d65957b7a86ae3..92b37028fb11d2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
@@ -542,7 +542,7 @@ declare <3 x double> @llvm.experimental.vp.strided.load.v3f64.p0.i32(ptr, i32, <
define <32 x double> @strided_vpload_v32f64(ptr %ptr, i32 signext %stride, <32 x i1> %m, i32 zeroext %evl) nounwind {
; CHECK-LABEL: strided_vpload_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: li a4, 16
; CHECK-NEXT: mv a3, a2
@@ -599,7 +599,7 @@ declare <32 x double> @llvm.experimental.vp.strided.load.v32f64.p0.i32(ptr, i32,
define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_load_v33f64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv1r.v v8, v0
; CHECK-RV32-NEXT: li a5, 32
; CHECK-RV32-NEXT: mv a3, a4
@@ -650,7 +650,7 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask
;
; CHECK-RV64-LABEL: strided_load_v33f64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv1r.v v8, v0
; CHECK-RV64-NEXT: li a5, 32
; CHECK-RV64-NEXT: mv a4, a3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
index 61a4c4f6bc3d80..75479038a29354 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
@@ -53,7 +53,7 @@ declare <128 x i7> @llvm.vp.trunc.v128i7.v128i16(<128 x i16>, <128 x i1>, i32)
define <128 x i7> @vtrunc_v128i7_v128i16(<128 x i16> %a, <128 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_v128i7_v128i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: li a1, 64
; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
@@ -232,7 +232,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: mul a2, a2, a3
; CHECK-NEXT: sub sp, sp, a2
; CHECK-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 72 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: li a3, 24
@@ -543,7 +543,7 @@ declare <32 x i32> @llvm.vp.trunc.v32i32.v32i64(<32 x i64>, <32 x i1>, i32)
define <32 x i32> @vtrunc_v32i32_v32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_v32i32_v32i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
index a6e2b04d3b2712..f7179906f02a71 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
@@ -105,7 +105,7 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> %
; RV32-SLOW-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; RV32-SLOW-NEXT: vslideup.vi v9, v8, 1
; RV32-SLOW-NEXT: .LBB4_4: # %else2
-; RV32-SLOW-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-SLOW-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-SLOW-NEXT: vmv1r.v v8, v9
; RV32-SLOW-NEXT: ret
;
@@ -139,7 +139,7 @@ define <2 x i16> @mgather_v2i16_align1(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i16> %
; RV64-SLOW-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; RV64-SLOW-NEXT: vslideup.vi v9, v8, 1
; RV64-SLOW-NEXT: .LBB4_4: # %else2
-; RV64-SLOW-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-SLOW-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-SLOW-NEXT: vmv1r.v v8, v9
; RV64-SLOW-NEXT: ret
;
@@ -191,7 +191,7 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> %
; RV32-SLOW-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-SLOW-NEXT: vslideup.vi v9, v8, 1
; RV32-SLOW-NEXT: .LBB5_4: # %else2
-; RV32-SLOW-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-SLOW-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-SLOW-NEXT: vmv1r.v v8, v9
; RV32-SLOW-NEXT: ret
;
@@ -223,7 +223,7 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> %
; RV64-SLOW-NEXT: vmv.s.x v8, a0
; RV64-SLOW-NEXT: vslideup.vi v9, v8, 1
; RV64-SLOW-NEXT: .LBB5_4: # %else2
-; RV64-SLOW-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-SLOW-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-SLOW-NEXT: vmv1r.v v8, v9
; RV64-SLOW-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
index 1dba245b6769c0..36ce83bbdd660b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
@@ -363,7 +363,7 @@ declare <256 x i8> @llvm.vp.add.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32)
define <256 x i8> @vadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_v258i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
index ee650d46cedb07..ab22a3f4a7ba02 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
@@ -267,7 +267,7 @@ declare <256 x i8> @llvm.vp.smax.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32)
define <256 x i8> @vmax_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_v258i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
index 34d7ce30d245c2..0beab68f77aaac 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
@@ -266,7 +266,7 @@ declare <256 x i8> @llvm.vp.umax.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32)
define <256 x i8> @vmaxu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_v258i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
index 88258d93592abe..5cbb4a291b51df 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
@@ -267,7 +267,7 @@ declare <256 x i8> @llvm.vp.smin.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32)
define <256 x i8> @vmin_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_v258i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
index 31d9f83a844e71..e227fbd60252fd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
@@ -266,7 +266,7 @@ declare <256 x i8> @llvm.vp.umin.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32)
define <256 x i8> @vminu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_v258i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
index 33093e53062e7e..dab302cdfc680b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
@@ -2617,7 +2617,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(ptr %base, <32 x i32>
define <32 x double> @vpgather_baseidx_v32f64(ptr %base, <32 x i64> %idxs, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_v32f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v7, v0
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vnsrl.wi v24, v16, 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
index c64c77d2bdf034..d2969a0a975b90 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
@@ -394,7 +394,7 @@ declare <33 x double> @llvm.vp.load.v33f64.p0(ptr, <33 x i1>, i32)
define <33 x double> @vpload_v33f64(ptr %ptr, <33 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v33f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: li a4, 32
; CHECK-NEXT: mv a3, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
index 2f659dfbe8f73b..ad7a14f0b80cf4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
@@ -1181,7 +1181,7 @@ define <32 x double> @vpmerge_vv_v32f64(<32 x double> %va, <32 x double> %vb, <3
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
index 3edba04af25595..0f9f4b5221f291 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
@@ -372,7 +372,7 @@ declare <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>,
define <256 x i8> @vsadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsadd_vi_v258i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
index e7a553fc61689b..4f4f290c7f72a7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
@@ -368,7 +368,7 @@ declare <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>,
define <256 x i8> @vsaddu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsaddu_vi_v258i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
index 59ef583a382d14..82d11838670012 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
@@ -163,7 +163,7 @@ define <256 x i8> @select_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i3
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v6, v8
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: li a2, 128
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
index 7e703fafa9ab4f..c3aa3ba6020b19 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
@@ -384,7 +384,7 @@ declare <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>,
define <256 x i8> @vssub_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vssub_vi_v258i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: addi a3, a1, -128
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
index 125b6ba22ebb86..10ba1637f33ac7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
@@ -379,7 +379,7 @@ declare <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>,
define <256 x i8> @vssubu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vssubu_vi_v258i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: addi a3, a1, -128
diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
index ae033fe94716fa..81dbd71b48112f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
@@ -117,7 +117,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.floor.nxv4bf16(<vscale x 4 x bfloat>, <vs
define <vscale x 4 x bfloat> @vp_floor_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv4bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -170,7 +170,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.floor.nxv8bf16(<vscale x 8 x bfloat>, <vs
define <vscale x 8 x bfloat> @vp_floor_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv8bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -223,7 +223,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.floor.nxv16bf16(<vscale x 16 x bfloat>,
define <vscale x 16 x bfloat> @vp_floor_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv16bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -282,7 +282,7 @@ define <vscale x 32 x bfloat> @vp_floor_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -586,7 +586,7 @@ define <vscale x 4 x half> @vp_floor_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vp_floor_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -654,7 +654,7 @@ declare <vscale x 8 x half> @llvm.vp.floor.nxv8f16(<vscale x 8 x half>, <vscale
define <vscale x 8 x half> @vp_floor_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -674,7 +674,7 @@ define <vscale x 8 x half> @vp_floor_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vp_floor_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -742,7 +742,7 @@ declare <vscale x 16 x half> @llvm.vp.floor.nxv16f16(<vscale x 16 x half>, <vsca
define <vscale x 16 x half> @vp_floor_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -762,7 +762,7 @@ define <vscale x 16 x half> @vp_floor_nxv16f16(<vscale x 16 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vp_floor_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -830,7 +830,7 @@ declare <vscale x 32 x half> @llvm.vp.floor.nxv32f16(<vscale x 32 x half>, <vsca
define <vscale x 32 x half> @vp_floor_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -856,7 +856,7 @@ define <vscale x 32 x half> @vp_floor_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -1079,7 +1079,7 @@ declare <vscale x 4 x float> @llvm.vp.floor.nxv4f32(<vscale x 4 x float>, <vscal
define <vscale x 4 x float> @vp_floor_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1124,7 +1124,7 @@ declare <vscale x 8 x float> @llvm.vp.floor.nxv8f32(<vscale x 8 x float>, <vscal
define <vscale x 8 x float> @vp_floor_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1169,7 +1169,7 @@ declare <vscale x 16 x float> @llvm.vp.floor.nxv16f32(<vscale x 16 x float>, <vs
define <vscale x 16 x float> @vp_floor_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1256,7 +1256,7 @@ declare <vscale x 2 x double> @llvm.vp.floor.nxv2f64(<vscale x 2 x double>, <vsc
define <vscale x 2 x double> @vp_floor_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1301,7 +1301,7 @@ declare <vscale x 4 x double> @llvm.vp.floor.nxv4f64(<vscale x 4 x double>, <vsc
define <vscale x 4 x double> @vp_floor_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1346,7 +1346,7 @@ declare <vscale x 7 x double> @llvm.vp.floor.nxv7f64(<vscale x 7 x double>, <vsc
define <vscale x 7 x double> @vp_floor_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1391,7 +1391,7 @@ declare <vscale x 8 x double> @llvm.vp.floor.nxv8f64(<vscale x 8 x double>, <vsc
define <vscale x 8 x double> @vp_floor_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1443,7 +1443,7 @@ define <vscale x 16 x double> @vp_floor_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
index 9269339fa001fb..237900bee21f7a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
@@ -153,7 +153,7 @@ define <vscale x 32 x bfloat> @vfmax_nxv32bf16_vv(<vscale x 32 x bfloat> %a, <vs
; ZVFH-NEXT: slli a0, a0, 1
; ZVFH-NEXT: add a0, a0, a1
; ZVFH-NEXT: sub sp, sp, a0
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv8r.v v24, v16
; ZVFH-NEXT: csrr a0, vlenb
; ZVFH-NEXT: slli a0, a0, 3
@@ -229,7 +229,7 @@ define <vscale x 32 x bfloat> @vfmax_nxv32bf16_vv(<vscale x 32 x bfloat> %a, <vs
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
@@ -500,7 +500,7 @@ define <vscale x 32 x half> @vfmax_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
index c51181fe133551..6875d336b0ed54 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
@@ -19,7 +19,7 @@ declare <vscale x 1 x bfloat> @llvm.vp.maximum.nxv1bf16(<vscale x 1 x bfloat>, <
define <vscale x 1 x bfloat> @vfmax_vv_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv1bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v11, v8
@@ -66,7 +66,7 @@ declare <vscale x 2 x bfloat> @llvm.vp.maximum.nxv2bf16(<vscale x 2 x bfloat>, <
define <vscale x 2 x bfloat> @vfmax_vv_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv2bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v11, v8
@@ -113,7 +113,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.maximum.nxv4bf16(<vscale x 4 x bfloat>, <
define <vscale x 4 x bfloat> @vfmax_vv_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv4bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -162,7 +162,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.maximum.nxv8bf16(<vscale x 8 x bfloat>, <
define <vscale x 8 x bfloat> @vfmax_vv_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv8bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -217,7 +217,7 @@ define <vscale x 16 x bfloat> @vfmax_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8
@@ -570,7 +570,7 @@ declare <vscale x 1 x half> @llvm.vp.maximum.nxv1f16(<vscale x 1 x half>, <vscal
define <vscale x 1 x half> @vfmax_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_nxv1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -584,7 +584,7 @@ define <vscale x 1 x half> @vfmax_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmax_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -642,7 +642,7 @@ declare <vscale x 2 x half> @llvm.vp.maximum.nxv2f16(<vscale x 2 x half>, <vscal
define <vscale x 2 x half> @vfmax_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_nxv2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -656,7 +656,7 @@ define <vscale x 2 x half> @vfmax_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmax_vv_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -714,7 +714,7 @@ declare <vscale x 4 x half> @llvm.vp.maximum.nxv4f16(<vscale x 4 x half>, <vscal
define <vscale x 4 x half> @vfmax_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_nxv4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -728,7 +728,7 @@ define <vscale x 4 x half> @vfmax_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmax_vv_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -788,7 +788,7 @@ declare <vscale x 8 x half> @llvm.vp.maximum.nxv8f16(<vscale x 8 x half>, <vscal
define <vscale x 8 x half> @vfmax_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -804,7 +804,7 @@ define <vscale x 8 x half> @vfmax_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmax_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -864,7 +864,7 @@ declare <vscale x 16 x half> @llvm.vp.maximum.nxv16f16(<vscale x 16 x half>, <vs
define <vscale x 16 x half> @vfmax_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmax_vv_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -886,7 +886,7 @@ define <vscale x 16 x half> @vfmax_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
@@ -976,7 +976,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFH-NEXT: slli a1, a1, 3
; ZVFH-NEXT: sub sp, sp, a1
; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v7, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vmfeq.vv v25, v8, v8, v0.t
@@ -1293,7 +1293,7 @@ declare <vscale x 1 x float> @llvm.vp.maximum.nxv1f32(<vscale x 1 x float>, <vsc
define <vscale x 1 x float> @vfmax_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -1328,7 +1328,7 @@ declare <vscale x 2 x float> @llvm.vp.maximum.nxv2f32(<vscale x 2 x float>, <vsc
define <vscale x 2 x float> @vfmax_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -1363,7 +1363,7 @@ declare <vscale x 4 x float> @llvm.vp.maximum.nxv4f32(<vscale x 4 x float>, <vsc
define <vscale x 4 x float> @vfmax_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -1400,7 +1400,7 @@ declare <vscale x 8 x float> @llvm.vp.maximum.nxv8f32(<vscale x 8 x float>, <vsc
define <vscale x 8 x float> @vfmax_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -1437,7 +1437,7 @@ declare <vscale x 1 x double> @llvm.vp.maximum.nxv1f64(<vscale x 1 x double>, <v
define <vscale x 1 x double> @vfmax_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv1f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -1472,7 +1472,7 @@ declare <vscale x 2 x double> @llvm.vp.maximum.nxv2f64(<vscale x 2 x double>, <v
define <vscale x 2 x double> @vfmax_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -1509,7 +1509,7 @@ declare <vscale x 4 x double> @llvm.vp.maximum.nxv4f64(<vscale x 4 x double>, <v
define <vscale x 4 x double> @vfmax_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -1552,7 +1552,7 @@ define <vscale x 8 x double> @vfmax_vv_nxv8f64(<vscale x 8 x double> %va, <vscal
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
@@ -1605,7 +1605,7 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
index 6145f0c1fc3a93..1b148353a377b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
@@ -153,7 +153,7 @@ define <vscale x 32 x bfloat> @vfmin_nxv32bf16_vv(<vscale x 32 x bfloat> %a, <vs
; ZVFH-NEXT: slli a0, a0, 1
; ZVFH-NEXT: add a0, a0, a1
; ZVFH-NEXT: sub sp, sp, a0
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv8r.v v24, v16
; ZVFH-NEXT: csrr a0, vlenb
; ZVFH-NEXT: slli a0, a0, 3
@@ -229,7 +229,7 @@ define <vscale x 32 x bfloat> @vfmin_nxv32bf16_vv(<vscale x 32 x bfloat> %a, <vs
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
@@ -500,7 +500,7 @@ define <vscale x 32 x half> @vfmin_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
index 64df5dd4fa0745..209ee809feee4f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
@@ -19,7 +19,7 @@ declare <vscale x 1 x bfloat> @llvm.vp.minimum.nxv1bf16(<vscale x 1 x bfloat>, <
define <vscale x 1 x bfloat> @vfmin_vv_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv1bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v11, v8
@@ -66,7 +66,7 @@ declare <vscale x 2 x bfloat> @llvm.vp.minimum.nxv2bf16(<vscale x 2 x bfloat>, <
define <vscale x 2 x bfloat> @vfmin_vv_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv2bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v11, v8
@@ -113,7 +113,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.minimum.nxv4bf16(<vscale x 4 x bfloat>, <
define <vscale x 4 x bfloat> @vfmin_vv_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv4bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -162,7 +162,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.minimum.nxv8bf16(<vscale x 8 x bfloat>, <
define <vscale x 8 x bfloat> @vfmin_vv_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv8bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -217,7 +217,7 @@ define <vscale x 16 x bfloat> @vfmin_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8
@@ -570,7 +570,7 @@ declare <vscale x 1 x half> @llvm.vp.minimum.nxv1f16(<vscale x 1 x half>, <vscal
define <vscale x 1 x half> @vfmin_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_nxv1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -584,7 +584,7 @@ define <vscale x 1 x half> @vfmin_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmin_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -642,7 +642,7 @@ declare <vscale x 2 x half> @llvm.vp.minimum.nxv2f16(<vscale x 2 x half>, <vscal
define <vscale x 2 x half> @vfmin_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_nxv2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -656,7 +656,7 @@ define <vscale x 2 x half> @vfmin_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmin_vv_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
@@ -714,7 +714,7 @@ declare <vscale x 4 x half> @llvm.vp.minimum.nxv4f16(<vscale x 4 x half>, <vscal
define <vscale x 4 x half> @vfmin_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_nxv4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -728,7 +728,7 @@ define <vscale x 4 x half> @vfmin_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmin_vv_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -788,7 +788,7 @@ declare <vscale x 8 x half> @llvm.vp.minimum.nxv8f16(<vscale x 8 x half>, <vscal
define <vscale x 8 x half> @vfmin_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -804,7 +804,7 @@ define <vscale x 8 x half> @vfmin_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmin_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -864,7 +864,7 @@ declare <vscale x 16 x half> @llvm.vp.minimum.nxv16f16(<vscale x 16 x half>, <vs
define <vscale x 16 x half> @vfmin_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vfmin_vv_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -886,7 +886,7 @@ define <vscale x 16 x half> @vfmin_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
@@ -976,7 +976,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFH-NEXT: slli a1, a1, 3
; ZVFH-NEXT: sub sp, sp, a1
; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v7, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vmfeq.vv v25, v8, v8, v0.t
@@ -1293,7 +1293,7 @@ declare <vscale x 1 x float> @llvm.vp.minimum.nxv1f32(<vscale x 1 x float>, <vsc
define <vscale x 1 x float> @vfmin_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -1328,7 +1328,7 @@ declare <vscale x 2 x float> @llvm.vp.minimum.nxv2f32(<vscale x 2 x float>, <vsc
define <vscale x 2 x float> @vfmin_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -1363,7 +1363,7 @@ declare <vscale x 4 x float> @llvm.vp.minimum.nxv4f32(<vscale x 4 x float>, <vsc
define <vscale x 4 x float> @vfmin_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -1400,7 +1400,7 @@ declare <vscale x 8 x float> @llvm.vp.minimum.nxv8f32(<vscale x 8 x float>, <vsc
define <vscale x 8 x float> @vfmin_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -1437,7 +1437,7 @@ declare <vscale x 1 x double> @llvm.vp.minimum.nxv1f64(<vscale x 1 x double>, <v
define <vscale x 1 x double> @vfmin_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv1f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -1472,7 +1472,7 @@ declare <vscale x 2 x double> @llvm.vp.minimum.nxv2f64(<vscale x 2 x double>, <v
define <vscale x 2 x double> @vfmin_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmfeq.vv v13, v8, v8, v0.t
@@ -1509,7 +1509,7 @@ declare <vscale x 4 x double> @llvm.vp.minimum.nxv4f64(<vscale x 4 x double>, <v
define <vscale x 4 x double> @vfmin_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfeq.vv v17, v8, v8, v0.t
@@ -1552,7 +1552,7 @@ define <vscale x 8 x double> @vfmin_vv_nxv8f64(<vscale x 8 x double> %va, <vscal
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v8, v8, v0.t
@@ -1605,7 +1605,7 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
diff --git a/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll b/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
index 85eb82e2b09e50..882e85a9b69ea6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
@@ -18,7 +18,7 @@ define i32 @test(i32 %size, ptr %add.ptr, i64 %const) {
; RV32-NEXT: .LBB0_1: # %for.body
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
; RV32-NEXT: th.lrb a0, a1, a0, 0
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v9, v8
; RV32-NEXT: vsetivli zero, 8, e8, m1, tu, ma
; RV32-NEXT: vmv.s.x v9, a0
@@ -46,7 +46,7 @@ define i32 @test(i32 %size, ptr %add.ptr, i64 %const) {
; RV64-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: th.lrb a0, a1, a0, 0
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v9, v8
; RV64-NEXT: vsetivli zero, 8, e8, m1, tu, ma
; RV64-NEXT: vmv.s.x v9, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
index 64c22eac537748..4c70fdbfb3d57d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
@@ -703,7 +703,7 @@ define <vscale x 16 x i32> @fshl_v16i32(<vscale x 16 x i32> %a, <vscale x 16 x i
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: li a0, 31
@@ -883,7 +883,7 @@ define <vscale x 7 x i64> @fshl_v7i64(<vscale x 7 x i64> %a, <vscale x 7 x i64>
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: li a0, 63
@@ -955,7 +955,7 @@ define <vscale x 8 x i64> @fshl_v8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64>
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: li a0, 63
@@ -991,7 +991,7 @@ define <vscale x 16 x i64> @fshr_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a3, 24
@@ -1177,7 +1177,7 @@ define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 5
diff --git a/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll b/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll
index 758c0beb4decc1..158b960f1431ed 100644
--- a/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll
@@ -365,13 +365,13 @@ entry:
define <vscale x 4 x i8> @test_specify_reg_mf2(<vscale x 4 x i8> %in, <vscale x 4 x i8> %in2) nounwind {
; CHECK-LABEL: test_specify_reg_mf2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v2, v9
; CHECK-NEXT: vmv1r.v v1, v8
; CHECK-NEXT: #APP
; CHECK-NEXT: vadd.vv v0, v1, v2
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: ret
entry:
@@ -382,13 +382,13 @@ entry:
define <vscale x 8 x i8> @test_specify_reg_m1(<vscale x 8 x i8> %in, <vscale x 8 x i8> %in2) nounwind {
; CHECK-LABEL: test_specify_reg_m1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v2, v9
; CHECK-NEXT: vmv1r.v v1, v8
; CHECK-NEXT: #APP
; CHECK-NEXT: vadd.vv v0, v1, v2
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: ret
entry:
@@ -399,13 +399,13 @@ entry:
define <vscale x 16 x i8> @test_specify_reg_m2(<vscale x 16 x i8> %in, <vscale x 16 x i8> %in2) nounwind {
; CHECK-LABEL: test_specify_reg_m2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v4, v10
; CHECK-NEXT: vmv2r.v v2, v8
; CHECK-NEXT: #APP
; CHECK-NEXT: vadd.vv v0, v2, v4
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v0
; CHECK-NEXT: ret
entry:
@@ -416,7 +416,7 @@ entry:
define <vscale x 1 x i1> @test_specify_reg_mask(<vscale x 1 x i1> %in, <vscale x 1 x i1> %in2) nounwind {
; CHECK-LABEL: test_specify_reg_mask:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v2, v8
; CHECK-NEXT: vmv1r.v v1, v0
; CHECK-NEXT: #APP
diff --git a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
index 68aefd71014c6f..053019c430ee81 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
@@ -5,7 +5,7 @@
define <vscale x 8 x i32> @insert_nxv8i32_nxv4i32_0(<vscale x 8 x i32> %vec, <vscale x 4 x i32> %subvec) {
; CHECK-LABEL: insert_nxv8i32_nxv4i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 4 x i32> %subvec, i64 0)
@@ -15,7 +15,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv4i32_0(<vscale x 8 x i32> %vec, <vs
define <vscale x 8 x i32> @insert_nxv8i32_nxv4i32_4(<vscale x 8 x i32> %vec, <vscale x 4 x i32> %subvec) {
; CHECK-LABEL: insert_nxv8i32_nxv4i32_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v10, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 4 x i32> %subvec, i64 4)
@@ -25,7 +25,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv4i32_4(<vscale x 8 x i32> %vec, <vs
define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_0(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv8i32_nxv2i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec, i64 0)
@@ -35,7 +35,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_0(<vscale x 8 x i32> %vec, <vs
define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_2(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv8i32_nxv2i32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec, i64 2)
@@ -45,7 +45,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_2(<vscale x 8 x i32> %vec, <vs
define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_4(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv8i32_nxv2i32_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec, i64 4)
@@ -55,7 +55,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_4(<vscale x 8 x i32> %vec, <vs
define <vscale x 8 x i32> @insert_nxv8i32_nxv2i32_6(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv8i32_nxv2i32_6:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vector.insert.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, <vscale x 2 x i32> %subvec, i64 6)
@@ -92,7 +92,7 @@ define <vscale x 4 x i8> @insert_nxv1i8_nxv4i8_3(<vscale x 4 x i8> %vec, <vscale
define <vscale x 16 x i32> @insert_nxv16i32_nxv8i32_0(<vscale x 16 x i32> %vec, <vscale x 8 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv8i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 8 x i32> %subvec, i64 0)
@@ -102,7 +102,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv8i32_0(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv8i32_8(<vscale x 16 x i32> %vec, <vscale x 8 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv8i32_8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v12, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 8 x i32> %subvec, i64 8)
@@ -112,7 +112,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv8i32_8(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_0(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv4i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec, i64 0)
@@ -122,7 +122,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_0(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_4(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv4i32_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v10, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec, i64 4)
@@ -132,7 +132,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_4(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_8(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv4i32_8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v12, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec, i64 8)
@@ -142,7 +142,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_8(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_12(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv4i32_12:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v14, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 4 x i32> %subvec, i64 12)
@@ -152,7 +152,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv4i32_12(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_0(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 0)
@@ -162,7 +162,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_0(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_2(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 2)
@@ -172,7 +172,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_2(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_4(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 4)
@@ -182,7 +182,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_4(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_6(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_6:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 6)
@@ -192,7 +192,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_6(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_8(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 8)
@@ -202,7 +202,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_8(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_10(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_10:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 10)
@@ -212,7 +212,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_10(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_12(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_12:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v14, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 12)
@@ -222,7 +222,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_12(<vscale x 16 x i32> %vec,
define <vscale x 16 x i32> @insert_nxv16i32_nxv2i32_14(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_nxv16i32_nxv2i32_14:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v15, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vector.insert.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 2 x i32> %subvec, i64 14)
@@ -532,7 +532,7 @@ define <vscale x 2 x i64> @insert_nxv2i64_nxv3i64(<3 x i64> %sv) #0 {
define <vscale x 8 x i32> @insert_insert_combine(<2 x i32> %subvec) {
; CHECK-LABEL: insert_insert_combine:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: ret
%inner = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v2i32(<vscale x 4 x i32> undef, <2 x i32> %subvec, i64 0)
@@ -545,7 +545,7 @@ define <vscale x 8 x i32> @insert_insert_combine(<2 x i32> %subvec) {
define <vscale x 8 x i32> @insert_insert_combine2(<vscale x 2 x i32> %subvec) {
; CHECK-LABEL: insert_insert_combine2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: ret
%inner = call <vscale x 4 x i32> @llvm.vector.insert.nxv2i32.nxv4i32(<vscale x 4 x i32> undef, <vscale x 2 x i32> %subvec, i64 0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll
index a1b66771a5f692..19f1a6241ace54 100644
--- a/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll
@@ -55,7 +55,7 @@ declare <vscale x 8 x i64> @llvm.vp.llrint.nxv8i64.nxv8f32(<vscale x 8 x float>,
define <vscale x 16 x i64> @llrint_nxv16i64_nxv16f32(<vscale x 16 x float> %x, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: llrint_nxv16i64_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll
index 963886d278ba80..667afae4762d2e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll
@@ -117,7 +117,7 @@ define <vscale x 16 x iXLen> @lrint_nxv16f32(<vscale x 16 x float> %x, <vscale x
;
; RV64-i64-LABEL: lrint_nxv16f32:
; RV64-i64: # %bb.0:
-; RV64-i64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-i64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-i64-NEXT: vmv1r.v v24, v0
; RV64-i64-NEXT: csrr a1, vlenb
; RV64-i64-NEXT: srli a2, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
index 4888d7090a1345..9fb405c79cc3cb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
@@ -1288,7 +1288,7 @@ declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
@@ -1313,7 +1313,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
@@ -1445,7 +1445,7 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
index 4f75d693a3f22f..57fabbf4b81e7f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
@@ -221,13 +221,13 @@ define <vscale x 4 x i8> @mgather_truemask_nxv4i8(<vscale x 4 x ptr> %ptrs, <vsc
define <vscale x 4 x i8> @mgather_falsemask_nxv4i8(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i8> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4i8:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4i8:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i8> %passthru)
@@ -444,13 +444,13 @@ define <vscale x 4 x i16> @mgather_truemask_nxv4i16(<vscale x 4 x ptr> %ptrs, <v
define <vscale x 4 x i16> @mgather_falsemask_nxv4i16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i16> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4i16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4i16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i16> %passthru)
@@ -690,13 +690,13 @@ define <vscale x 4 x i32> @mgather_truemask_nxv4i32(<vscale x 4 x ptr> %ptrs, <v
define <vscale x 4 x i32> @mgather_falsemask_nxv4i32(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i32> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv2r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i32> %passthru)
@@ -955,7 +955,7 @@ define <vscale x 4 x i64> @mgather_truemask_nxv4i64(<vscale x 4 x ptr> %ptrs, <v
define <vscale x 4 x i64> @mgather_falsemask_nxv4i64(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i64> %passthru) {
; CHECK-LABEL: mgather_falsemask_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i64> %passthru)
@@ -1239,7 +1239,7 @@ define void @mgather_nxv16i64(<vscale x 8 x ptr> %ptrs0, <vscale x 8 x ptr> %ptr
; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; RV64-NEXT: addi a3, sp, 16
; RV64-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv8r.v v16, v8
; RV64-NEXT: vl8re64.v v24, (a0)
; RV64-NEXT: csrr a0, vlenb
@@ -1356,13 +1356,13 @@ define <vscale x 4 x bfloat> @mgather_truemask_nxv4bf16(<vscale x 4 x ptr> %ptrs
define <vscale x 4 x bfloat> @mgather_falsemask_nxv4bf16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x bfloat> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4bf16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4bf16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x bfloat> %passthru)
@@ -1559,13 +1559,13 @@ define <vscale x 4 x half> @mgather_truemask_nxv4f16(<vscale x 4 x ptr> %ptrs, <
define <vscale x 4 x half> @mgather_falsemask_nxv4f16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x half> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x half> %passthru)
@@ -1761,13 +1761,13 @@ define <vscale x 4 x float> @mgather_truemask_nxv4f32(<vscale x 4 x ptr> %ptrs,
define <vscale x 4 x float> @mgather_falsemask_nxv4f32(<vscale x 4 x ptr> %ptrs, <vscale x 4 x float> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv2r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x float> %passthru)
@@ -2026,7 +2026,7 @@ define <vscale x 4 x double> @mgather_truemask_nxv4f64(<vscale x 4 x ptr> %ptrs,
define <vscale x 4 x double> @mgather_falsemask_nxv4f64(<vscale x 4 x ptr> %ptrs, <vscale x 4 x double> %passthru) {
; CHECK-LABEL: mgather_falsemask_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x double> %passthru)
@@ -2332,7 +2332,7 @@ define <vscale x 32 x i8> @mgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8>
;
; RV64-LABEL: mgather_baseidx_nxv32i8:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v16, v0
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v24, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
index 549a0d88bf3e72..b6c3077aa7e579 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
@@ -2009,7 +2009,7 @@ define void @mscatter_baseidx_nxv16i16_nxv16f64(<vscale x 8 x double> %val0, <vs
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv8r.v v16, v8
; RV32-NEXT: vl4re16.v v8, (a1)
; RV32-NEXT: csrr a1, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
index 87fe5b25972051..f5df3d549843f9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
@@ -1169,7 +1169,7 @@ define <vscale x 32 x i8> @reverse_nxv32i8(<vscale x 32 x i8> %a) {
define <vscale x 64 x i8> @reverse_nxv64i8(<vscale x 64 x i8> %a) {
; RV32-BITS-UNKNOWN-LABEL: reverse_nxv64i8:
; RV32-BITS-UNKNOWN: # %bb.0:
-; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-BITS-UNKNOWN-NEXT: vmv8r.v v16, v8
; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb
; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1
@@ -1189,7 +1189,7 @@ define <vscale x 64 x i8> @reverse_nxv64i8(<vscale x 64 x i8> %a) {
;
; RV32-BITS-256-LABEL: reverse_nxv64i8:
; RV32-BITS-256: # %bb.0:
-; RV32-BITS-256-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-BITS-256-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-BITS-256-NEXT: vmv8r.v v16, v8
; RV32-BITS-256-NEXT: csrr a0, vlenb
; RV32-BITS-256-NEXT: addi a0, a0, -1
@@ -1208,7 +1208,7 @@ define <vscale x 64 x i8> @reverse_nxv64i8(<vscale x 64 x i8> %a) {
;
; RV32-BITS-512-LABEL: reverse_nxv64i8:
; RV32-BITS-512: # %bb.0:
-; RV32-BITS-512-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-BITS-512-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-BITS-512-NEXT: vmv8r.v v16, v8
; RV32-BITS-512-NEXT: csrr a0, vlenb
; RV32-BITS-512-NEXT: addi a0, a0, -1
@@ -1227,7 +1227,7 @@ define <vscale x 64 x i8> @reverse_nxv64i8(<vscale x 64 x i8> %a) {
;
; RV64-BITS-UNKNOWN-LABEL: reverse_nxv64i8:
; RV64-BITS-UNKNOWN: # %bb.0:
-; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-BITS-UNKNOWN-NEXT: vmv8r.v v16, v8
; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb
; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1
@@ -1247,7 +1247,7 @@ define <vscale x 64 x i8> @reverse_nxv64i8(<vscale x 64 x i8> %a) {
;
; RV64-BITS-256-LABEL: reverse_nxv64i8:
; RV64-BITS-256: # %bb.0:
-; RV64-BITS-256-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-BITS-256-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-BITS-256-NEXT: vmv8r.v v16, v8
; RV64-BITS-256-NEXT: csrr a0, vlenb
; RV64-BITS-256-NEXT: addi a0, a0, -1
@@ -1266,7 +1266,7 @@ define <vscale x 64 x i8> @reverse_nxv64i8(<vscale x 64 x i8> %a) {
;
; RV64-BITS-512-LABEL: reverse_nxv64i8:
; RV64-BITS-512: # %bb.0:
-; RV64-BITS-512-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-BITS-512-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-BITS-512-NEXT: vmv8r.v v16, v8
; RV64-BITS-512-NEXT: csrr a0, vlenb
; RV64-BITS-512-NEXT: addi a0, a0, -1
@@ -1373,7 +1373,7 @@ define <vscale x 16 x i16> @reverse_nxv16i16(<vscale x 16 x i16> %a) {
define <vscale x 32 x i16> @reverse_nxv32i16(<vscale x 32 x i16> %a) {
; CHECK-LABEL: reverse_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 1
@@ -1465,7 +1465,7 @@ define <vscale x 8 x i32> @reverse_nxv8i32(<vscale x 8 x i32> %a) {
define <vscale x 16 x i32> @reverse_nxv16i32(<vscale x 16 x i32> %a) {
; CHECK-LABEL: reverse_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
@@ -1541,7 +1541,7 @@ define <vscale x 4 x i64> @reverse_nxv4i64(<vscale x 4 x i64> %a) {
define <vscale x 8 x i64> @reverse_nxv8i64(<vscale x 8 x i64> %a) {
; CHECK-LABEL: reverse_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
@@ -1653,7 +1653,7 @@ define <vscale x 16 x bfloat> @reverse_nxv16bf16(<vscale x 16 x bfloat> %a) {
define <vscale x 32 x bfloat> @reverse_nxv32bf16(<vscale x 32 x bfloat> %a) {
; CHECK-LABEL: reverse_nxv32bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 1
@@ -1761,7 +1761,7 @@ define <vscale x 16 x half> @reverse_nxv16f16(<vscale x 16 x half> %a) {
define <vscale x 32 x half> @reverse_nxv32f16(<vscale x 32 x half> %a) {
; CHECK-LABEL: reverse_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 1
@@ -1853,7 +1853,7 @@ define <vscale x 8 x float> @reverse_nxv8f32(<vscale x 8 x float> %a) {
define <vscale x 16 x float> @reverse_nxv16f32(<vscale x 16 x float> %a) {
; CHECK-LABEL: reverse_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
@@ -1929,7 +1929,7 @@ define <vscale x 4 x double> @reverse_nxv4f64(<vscale x 4 x double> %a) {
define <vscale x 8 x double> @reverse_nxv8f64(<vscale x 8 x double> %a) {
; CHECK-LABEL: reverse_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
index 438d4cdf7d197b..21f76c2d7881ac 100644
--- a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
@@ -117,7 +117,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.nearbyint.nxv4bf16(<vscale x 4 x bfloat>,
define <vscale x 4 x bfloat> @vp_nearbyint_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv4bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -170,7 +170,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.nearbyint.nxv8bf16(<vscale x 8 x bfloat>,
define <vscale x 8 x bfloat> @vp_nearbyint_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv8bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -223,7 +223,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.nearbyint.nxv16bf16(<vscale x 16 x bfloa
define <vscale x 16 x bfloat> @vp_nearbyint_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv16bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -276,7 +276,7 @@ declare <vscale x 32 x bfloat> @llvm.vp.nearbyint.nxv32bf16(<vscale x 32 x bfloa
define <vscale x 32 x bfloat> @vp_nearbyint_nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv32bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -570,7 +570,7 @@ define <vscale x 4 x half> @vp_nearbyint_nxv4f16(<vscale x 4 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vp_nearbyint_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -638,7 +638,7 @@ declare <vscale x 8 x half> @llvm.vp.nearbyint.nxv8f16(<vscale x 8 x half>, <vsc
define <vscale x 8 x half> @vp_nearbyint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -658,7 +658,7 @@ define <vscale x 8 x half> @vp_nearbyint_nxv8f16(<vscale x 8 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vp_nearbyint_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -726,7 +726,7 @@ declare <vscale x 16 x half> @llvm.vp.nearbyint.nxv16f16(<vscale x 16 x half>, <
define <vscale x 16 x half> @vp_nearbyint_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -746,7 +746,7 @@ define <vscale x 16 x half> @vp_nearbyint_nxv16f16(<vscale x 16 x half> %va, <vs
;
; ZVFHMIN-LABEL: vp_nearbyint_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -814,7 +814,7 @@ declare <vscale x 32 x half> @llvm.vp.nearbyint.nxv32f16(<vscale x 32 x half>, <
define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -834,7 +834,7 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vs
;
; ZVFHMIN-LABEL: vp_nearbyint_nxv32f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -1047,7 +1047,7 @@ declare <vscale x 4 x float> @llvm.vp.nearbyint.nxv4f32(<vscale x 4 x float>, <v
define <vscale x 4 x float> @vp_nearbyint_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1092,7 +1092,7 @@ declare <vscale x 8 x float> @llvm.vp.nearbyint.nxv8f32(<vscale x 8 x float>, <v
define <vscale x 8 x float> @vp_nearbyint_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1137,7 +1137,7 @@ declare <vscale x 16 x float> @llvm.vp.nearbyint.nxv16f32(<vscale x 16 x float>,
define <vscale x 16 x float> @vp_nearbyint_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1224,7 +1224,7 @@ declare <vscale x 2 x double> @llvm.vp.nearbyint.nxv2f64(<vscale x 2 x double>,
define <vscale x 2 x double> @vp_nearbyint_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1269,7 +1269,7 @@ declare <vscale x 4 x double> @llvm.vp.nearbyint.nxv4f64(<vscale x 4 x double>,
define <vscale x 4 x double> @vp_nearbyint_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1314,7 +1314,7 @@ declare <vscale x 7 x double> @llvm.vp.nearbyint.nxv7f64(<vscale x 7 x double>,
define <vscale x 7 x double> @vp_nearbyint_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1359,7 +1359,7 @@ declare <vscale x 8 x double> @llvm.vp.nearbyint.nxv8f64(<vscale x 8 x double>,
define <vscale x 8 x double> @vp_nearbyint_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1405,7 +1405,7 @@ declare <vscale x 16 x double> @llvm.vp.nearbyint.nxv16f64(<vscale x 16 x double
define <vscale x 16 x double> @vp_nearbyint_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr88576.ll b/llvm/test/CodeGen/RISCV/rvv/pr88576.ll
index 082a8e8d714c6a..883cecfa2d635a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr88576.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr88576.ll
@@ -23,7 +23,7 @@ define i1 @foo(<vscale x 16 x i8> %x, i64 %y) {
; CHECK-NEXT: slli a2, a2, 4
; CHECK-NEXT: sub sp, sp, a2
; CHECK-NEXT: andi sp, sp, -64
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: addi a2, sp, 64
; CHECK-NEXT: slli a1, a1, 3
@@ -54,7 +54,7 @@ define i1 @foo(<vscale x 16 x i8> %x, i64 %y) {
define i8 @bar(<vscale x 128 x i1> %x, i64 %y) {
; CHECK-LABEL: bar:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v1, v8
; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v0, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
index c23b71789311d3..a6080a4a75055b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
@@ -109,7 +109,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.rint.nxv4bf16(<vscale x 4 x bfloat>, <vsc
define <vscale x 4 x bfloat> @vp_rint_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv4bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -158,7 +158,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.rint.nxv8bf16(<vscale x 8 x bfloat>, <vsc
define <vscale x 8 x bfloat> @vp_rint_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv8bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -207,7 +207,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.rint.nxv16bf16(<vscale x 16 x bfloat>, <
define <vscale x 16 x bfloat> @vp_rint_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv16bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -262,7 +262,7 @@ define <vscale x 32 x bfloat> @vp_rint_nxv32bf16(<vscale x 32 x bfloat> %va, <vs
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -539,7 +539,7 @@ define <vscale x 4 x half> @vp_rint_nxv4f16(<vscale x 4 x half> %va, <vscale x 4
;
; ZVFHMIN-LABEL: vp_rint_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -601,7 +601,7 @@ declare <vscale x 8 x half> @llvm.vp.rint.nxv8f16(<vscale x 8 x half>, <vscale x
define <vscale x 8 x half> @vp_rint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -619,7 +619,7 @@ define <vscale x 8 x half> @vp_rint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8
;
; ZVFHMIN-LABEL: vp_rint_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -681,7 +681,7 @@ declare <vscale x 16 x half> @llvm.vp.rint.nxv16f16(<vscale x 16 x half>, <vscal
define <vscale x 16 x half> @vp_rint_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -699,7 +699,7 @@ define <vscale x 16 x half> @vp_rint_nxv16f16(<vscale x 16 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vp_rint_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -761,7 +761,7 @@ declare <vscale x 32 x half> @llvm.vp.rint.nxv32f16(<vscale x 32 x half>, <vscal
define <vscale x 32 x half> @vp_rint_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -785,7 +785,7 @@ define <vscale x 32 x half> @vp_rint_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -989,7 +989,7 @@ declare <vscale x 4 x float> @llvm.vp.rint.nxv4f32(<vscale x 4 x float>, <vscale
define <vscale x 4 x float> @vp_rint_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1030,7 +1030,7 @@ declare <vscale x 8 x float> @llvm.vp.rint.nxv8f32(<vscale x 8 x float>, <vscale
define <vscale x 8 x float> @vp_rint_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1071,7 +1071,7 @@ declare <vscale x 16 x float> @llvm.vp.rint.nxv16f32(<vscale x 16 x float>, <vsc
define <vscale x 16 x float> @vp_rint_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1150,7 +1150,7 @@ declare <vscale x 2 x double> @llvm.vp.rint.nxv2f64(<vscale x 2 x double>, <vsca
define <vscale x 2 x double> @vp_rint_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1191,7 +1191,7 @@ declare <vscale x 4 x double> @llvm.vp.rint.nxv4f64(<vscale x 4 x double>, <vsca
define <vscale x 4 x double> @vp_rint_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1232,7 +1232,7 @@ declare <vscale x 7 x double> @llvm.vp.rint.nxv7f64(<vscale x 7 x double>, <vsca
define <vscale x 7 x double> @vp_rint_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1273,7 +1273,7 @@ declare <vscale x 8 x double> @llvm.vp.rint.nxv8f64(<vscale x 8 x double>, <vsca
define <vscale x 8 x double> @vp_rint_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1321,7 +1321,7 @@ define <vscale x 16 x double> @vp_rint_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
index 5898d1dc4dca27..e4415e0512f729 100644
--- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
@@ -117,7 +117,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.round.nxv4bf16(<vscale x 4 x bfloat>, <vs
define <vscale x 4 x bfloat> @vp_round_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv4bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -170,7 +170,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.round.nxv8bf16(<vscale x 8 x bfloat>, <vs
define <vscale x 8 x bfloat> @vp_round_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv8bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -223,7 +223,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.round.nxv16bf16(<vscale x 16 x bfloat>,
define <vscale x 16 x bfloat> @vp_round_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv16bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -282,7 +282,7 @@ define <vscale x 32 x bfloat> @vp_round_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -586,7 +586,7 @@ define <vscale x 4 x half> @vp_round_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vp_round_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -654,7 +654,7 @@ declare <vscale x 8 x half> @llvm.vp.round.nxv8f16(<vscale x 8 x half>, <vscale
define <vscale x 8 x half> @vp_round_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -674,7 +674,7 @@ define <vscale x 8 x half> @vp_round_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vp_round_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -742,7 +742,7 @@ declare <vscale x 16 x half> @llvm.vp.round.nxv16f16(<vscale x 16 x half>, <vsca
define <vscale x 16 x half> @vp_round_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -762,7 +762,7 @@ define <vscale x 16 x half> @vp_round_nxv16f16(<vscale x 16 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vp_round_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -830,7 +830,7 @@ declare <vscale x 32 x half> @llvm.vp.round.nxv32f16(<vscale x 32 x half>, <vsca
define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -856,7 +856,7 @@ define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -1079,7 +1079,7 @@ declare <vscale x 4 x float> @llvm.vp.round.nxv4f32(<vscale x 4 x float>, <vscal
define <vscale x 4 x float> @vp_round_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1124,7 +1124,7 @@ declare <vscale x 8 x float> @llvm.vp.round.nxv8f32(<vscale x 8 x float>, <vscal
define <vscale x 8 x float> @vp_round_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1169,7 +1169,7 @@ declare <vscale x 16 x float> @llvm.vp.round.nxv16f32(<vscale x 16 x float>, <vs
define <vscale x 16 x float> @vp_round_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1256,7 +1256,7 @@ declare <vscale x 2 x double> @llvm.vp.round.nxv2f64(<vscale x 2 x double>, <vsc
define <vscale x 2 x double> @vp_round_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1301,7 +1301,7 @@ declare <vscale x 4 x double> @llvm.vp.round.nxv4f64(<vscale x 4 x double>, <vsc
define <vscale x 4 x double> @vp_round_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1346,7 +1346,7 @@ declare <vscale x 7 x double> @llvm.vp.round.nxv7f64(<vscale x 7 x double>, <vsc
define <vscale x 7 x double> @vp_round_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1391,7 +1391,7 @@ declare <vscale x 8 x double> @llvm.vp.round.nxv8f64(<vscale x 8 x double>, <vsc
define <vscale x 8 x double> @vp_round_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1443,7 +1443,7 @@ define <vscale x 16 x double> @vp_round_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
index 9e86f3b873ff6a..01a028af4dde75 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
@@ -117,7 +117,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.roundeven.nxv4bf16(<vscale x 4 x bfloat>,
define <vscale x 4 x bfloat> @vp_roundeven_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv4bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -170,7 +170,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.roundeven.nxv8bf16(<vscale x 8 x bfloat>,
define <vscale x 8 x bfloat> @vp_roundeven_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv8bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -223,7 +223,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.roundeven.nxv16bf16(<vscale x 16 x bfloa
define <vscale x 16 x bfloat> @vp_roundeven_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv16bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -282,7 +282,7 @@ define <vscale x 32 x bfloat> @vp_roundeven_nxv32bf16(<vscale x 32 x bfloat> %va
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -586,7 +586,7 @@ define <vscale x 4 x half> @vp_roundeven_nxv4f16(<vscale x 4 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vp_roundeven_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -654,7 +654,7 @@ declare <vscale x 8 x half> @llvm.vp.roundeven.nxv8f16(<vscale x 8 x half>, <vsc
define <vscale x 8 x half> @vp_roundeven_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -674,7 +674,7 @@ define <vscale x 8 x half> @vp_roundeven_nxv8f16(<vscale x 8 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vp_roundeven_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -742,7 +742,7 @@ declare <vscale x 16 x half> @llvm.vp.roundeven.nxv16f16(<vscale x 16 x half>, <
define <vscale x 16 x half> @vp_roundeven_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -762,7 +762,7 @@ define <vscale x 16 x half> @vp_roundeven_nxv16f16(<vscale x 16 x half> %va, <vs
;
; ZVFHMIN-LABEL: vp_roundeven_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -830,7 +830,7 @@ declare <vscale x 32 x half> @llvm.vp.roundeven.nxv32f16(<vscale x 32 x half>, <
define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -856,7 +856,7 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vs
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -1079,7 +1079,7 @@ declare <vscale x 4 x float> @llvm.vp.roundeven.nxv4f32(<vscale x 4 x float>, <v
define <vscale x 4 x float> @vp_roundeven_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1124,7 +1124,7 @@ declare <vscale x 8 x float> @llvm.vp.roundeven.nxv8f32(<vscale x 8 x float>, <v
define <vscale x 8 x float> @vp_roundeven_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1169,7 +1169,7 @@ declare <vscale x 16 x float> @llvm.vp.roundeven.nxv16f32(<vscale x 16 x float>,
define <vscale x 16 x float> @vp_roundeven_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1256,7 +1256,7 @@ declare <vscale x 2 x double> @llvm.vp.roundeven.nxv2f64(<vscale x 2 x double>,
define <vscale x 2 x double> @vp_roundeven_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1301,7 +1301,7 @@ declare <vscale x 4 x double> @llvm.vp.roundeven.nxv4f64(<vscale x 4 x double>,
define <vscale x 4 x double> @vp_roundeven_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1346,7 +1346,7 @@ declare <vscale x 7 x double> @llvm.vp.roundeven.nxv7f64(<vscale x 7 x double>,
define <vscale x 7 x double> @vp_roundeven_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1391,7 +1391,7 @@ declare <vscale x 8 x double> @llvm.vp.roundeven.nxv8f64(<vscale x 8 x double>,
define <vscale x 8 x double> @vp_roundeven_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1443,7 +1443,7 @@ define <vscale x 16 x double> @vp_roundeven_nxv16f64(<vscale x 16 x double> %va,
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
index c303ffc871461e..b8602ad138a510 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
@@ -117,7 +117,7 @@ declare <vscale x 4 x bfloat> @llvm.vp.roundtozero.nxv4bf16(<vscale x 4 x bfloat
define <vscale x 4 x bfloat> @vp_roundtozero_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv4bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
@@ -170,7 +170,7 @@ declare <vscale x 8 x bfloat> @llvm.vp.roundtozero.nxv8bf16(<vscale x 8 x bfloat
define <vscale x 8 x bfloat> @vp_roundtozero_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv8bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
@@ -223,7 +223,7 @@ declare <vscale x 16 x bfloat> @llvm.vp.roundtozero.nxv16bf16(<vscale x 16 x bfl
define <vscale x 16 x bfloat> @vp_roundtozero_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv16bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
@@ -282,7 +282,7 @@ define <vscale x 32 x bfloat> @vp_roundtozero_nxv32bf16(<vscale x 32 x bfloat> %
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -586,7 +586,7 @@ define <vscale x 4 x half> @vp_roundtozero_nxv4f16(<vscale x 4 x half> %va, <vsc
;
; ZVFHMIN-LABEL: vp_roundtozero_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v9, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
@@ -654,7 +654,7 @@ declare <vscale x 8 x half> @llvm.vp.roundtozero.nxv8f16(<vscale x 8 x half>, <v
define <vscale x 8 x half> @vp_roundtozero_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI18_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a1)
@@ -674,7 +674,7 @@ define <vscale x 8 x half> @vp_roundtozero_nxv8f16(<vscale x 8 x half> %va, <vsc
;
; ZVFHMIN-LABEL: vp_roundtozero_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
@@ -742,7 +742,7 @@ declare <vscale x 16 x half> @llvm.vp.roundtozero.nxv16f16(<vscale x 16 x half>,
define <vscale x 16 x half> @vp_roundtozero_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI20_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a1)
@@ -762,7 +762,7 @@ define <vscale x 16 x half> @vp_roundtozero_nxv16f16(<vscale x 16 x half> %va, <
;
; ZVFHMIN-LABEL: vp_roundtozero_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
@@ -830,7 +830,7 @@ declare <vscale x 32 x half> @llvm.vp.roundtozero.nxv32f16(<vscale x 32 x half>,
define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI22_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a1)
@@ -856,7 +856,7 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
@@ -1079,7 +1079,7 @@ declare <vscale x 4 x float> @llvm.vp.roundtozero.nxv4f32(<vscale x 4 x float>,
define <vscale x 4 x float> @vp_roundtozero_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
@@ -1124,7 +1124,7 @@ declare <vscale x 8 x float> @llvm.vp.roundtozero.nxv8f32(<vscale x 8 x float>,
define <vscale x 8 x float> @vp_roundtozero_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
@@ -1169,7 +1169,7 @@ declare <vscale x 16 x float> @llvm.vp.roundtozero.nxv16f32(<vscale x 16 x float
define <vscale x 16 x float> @vp_roundtozero_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
@@ -1256,7 +1256,7 @@ declare <vscale x 2 x double> @llvm.vp.roundtozero.nxv2f64(<vscale x 2 x double>
define <vscale x 2 x double> @vp_roundtozero_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI36_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a1)
@@ -1301,7 +1301,7 @@ declare <vscale x 4 x double> @llvm.vp.roundtozero.nxv4f64(<vscale x 4 x double>
define <vscale x 4 x double> @vp_roundtozero_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI38_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a1)
@@ -1346,7 +1346,7 @@ declare <vscale x 7 x double> @llvm.vp.roundtozero.nxv7f64(<vscale x 7 x double>
define <vscale x 7 x double> @vp_roundtozero_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI40_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a1)
@@ -1391,7 +1391,7 @@ declare <vscale x 8 x double> @llvm.vp.roundtozero.nxv8f64(<vscale x 8 x double>
define <vscale x 8 x double> @vp_roundtozero_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI42_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a1)
@@ -1443,7 +1443,7 @@ define <vscale x 16 x double> @vp_roundtozero_nxv16f64(<vscale x 16 x double> %v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
index a88d5f1bec2468..2d1f419b8dfd97 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
@@ -17,7 +17,7 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O0-NEXT: slli a1, a1, 1
; SPILL-O0-NEXT: sub sp, sp, a1
; SPILL-O0-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
-; SPILL-O0-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; SPILL-O0-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; SPILL-O0-NEXT: vmv1r.v v10, v9
; SPILL-O0-NEXT: vmv1r.v v9, v8
; SPILL-O0-NEXT: csrr a1, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
index 9b01acad94aeb2..c411490b67d07f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
@@ -20,7 +20,7 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O0-NEXT: slli a1, a1, 1
; SPILL-O0-NEXT: sub sp, sp, a1
; SPILL-O0-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
-; SPILL-O0-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; SPILL-O0-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; SPILL-O0-NEXT: vmv1r.v v10, v9
; SPILL-O0-NEXT: vmv1r.v v9, v8
; SPILL-O0-NEXT: csrr a1, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
index 381d1c61b99862..6eaa1066cb7734 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
@@ -47,7 +47,7 @@ define <vscale x 16 x i32> @foo(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5,
; CHECK-NEXT: vs8r.v v8, (t1)
; CHECK-NEXT: sd t1, 0(sp)
; CHECK-NEXT: sd t0, 8(sp)
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: call bar
; CHECK-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
index fcf2d543a070fd..af210181906e00 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
@@ -941,7 +941,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32(
define <vscale x 2 x i32> @vredsum(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 %vl) {
; CHECK-LABEL: vredsum:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
; CHECK-NEXT: vredsum.vs v11, v9, v10
@@ -966,7 +966,7 @@ define <vscale x 2 x float> @vfredusum(<vscale x 2 x float> %passthru, <vscale x
; CHECK-LABEL: vfredusum:
; CHECK: # %bb.0:
; CHECK-NEXT: fsrmi a1, 0
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
; CHECK-NEXT: vfredusum.vs v11, v9, v10
@@ -1018,7 +1018,7 @@ define <vscale x 2 x float> @vfredusum_allones_mask(<vscale x 2 x float> %passth
define <vscale x 2 x i32> @unfoldable_vredsum_allones_mask_diff_vl(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y) {
; CHECK-LABEL: unfoldable_vredsum_allones_mask_diff_vl:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, ma
; CHECK-NEXT: vredsum.vs v11, v9, v10
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
index 1854ddbb2edd9b..9accfdffb7ba5e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
@@ -1473,7 +1473,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64bf16(<vscale x 64 x bfloat> %va, <vs
; CHECK-NEXT: add a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: mv a3, a1
@@ -3721,7 +3721,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFH-NEXT: slli a1, a1, 4
; ZVFH-NEXT: sub sp, sp, a1
; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v24, v0
; ZVFH-NEXT: csrr a1, vlenb
; ZVFH-NEXT: slli a1, a1, 3
@@ -3783,7 +3783,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: add a1, a1, a3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v24, v0
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: mv a3, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
index 52c6d54c1675c0..4c9e59c49b01a7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
@@ -1092,7 +1092,7 @@ define <vscale x 128 x i1> @icmp_eq_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -1144,7 +1144,7 @@ define <vscale x 128 x i1> @icmp_eq_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
define <vscale x 128 x i1> @icmp_eq_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -1175,7 +1175,7 @@ define <vscale x 128 x i1> @icmp_eq_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b,
define <vscale x 128 x i1> @icmp_eq_vx_swap_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_swap_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -2247,7 +2247,7 @@ define <vscale x 32 x i1> @icmp_eq_vv_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -2303,7 +2303,7 @@ define <vscale x 32 x i1> @icmp_eq_vv_nxv32i32(<vscale x 32 x i32> %va, <vscale
define <vscale x 32 x i1> @icmp_eq_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: srli a2, a3, 2
@@ -2337,7 +2337,7 @@ define <vscale x 32 x i1> @icmp_eq_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b,
define <vscale x 32 x i1> @icmp_eq_vx_swap_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_swap_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: srli a2, a3, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index a2de335e103461..84cc15379cad20 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -4865,7 +4865,7 @@ declare <4 x i1> @llvm.vp.icmp.v4i32(<4 x i32>, <4 x i32>, metadata, <4 x i1>, i
define void @sink_splat_vp_icmp(ptr nocapture %x, i32 signext %y, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_icmp:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
@@ -4907,7 +4907,7 @@ declare <4 x i1> @llvm.vp.fcmp.v4f32(<4 x float>, <4 x float>, metadata, <4 x i1
define void @sink_splat_vp_fcmp(ptr nocapture %x, float %y, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_fcmp:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a2, a0, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
index 0c1e9c8025de36..114f0edd971fdf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
@@ -663,7 +663,7 @@ declare <vscale x 3 x double> @llvm.experimental.vp.strided.load.nxv3f64.p0.i32(
define <vscale x 16 x double> @strided_load_nxv16f64(ptr %ptr, i64 %stride, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_load_nxv16f64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv1r.v v9, v0
; CHECK-RV32-NEXT: csrr a4, vlenb
; CHECK-RV32-NEXT: sub a2, a3, a4
@@ -689,7 +689,7 @@ define <vscale x 16 x double> @strided_load_nxv16f64(ptr %ptr, i64 %stride, <vsc
;
; CHECK-RV64-LABEL: strided_load_nxv16f64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv1r.v v9, v0
; CHECK-RV64-NEXT: csrr a4, vlenb
; CHECK-RV64-NEXT: sub a3, a2, a4
@@ -767,7 +767,7 @@ declare <vscale x 16 x double> @llvm.experimental.vp.strided.load.nxv16f64.p0.i6
define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vscale x 17 x i1> %mask, i32 zeroext %evl, ptr %hi_ptr) {
; CHECK-RV32-LABEL: strided_load_nxv17f64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV32-NEXT: vmv1r.v v8, v0
; CHECK-RV32-NEXT: csrr a2, vlenb
; CHECK-RV32-NEXT: slli a7, a2, 1
@@ -815,7 +815,7 @@ define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vsc
;
; CHECK-RV64-LABEL: strided_load_nxv17f64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-RV64-NEXT: vmv1r.v v8, v0
; CHECK-RV64-NEXT: csrr a4, vlenb
; CHECK-RV64-NEXT: slli a7, a4, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
index e1e79cd6061f46..ffb73d4827c709 100644
--- a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
@@ -615,7 +615,7 @@ define void @strided_store_nxv17f64(<vscale x 17 x double> %v, ptr %ptr, i32 sig
; CHECK-NEXT: slli a4, a4, 3
; CHECK-NEXT: sub sp, sp, a4
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: addi a4, sp, 16
; CHECK-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
@@ -631,7 +631,7 @@ define void @strided_store_nxv17f64(<vscale x 17 x double> %v, ptr %ptr, i32 sig
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a7, a4
; CHECK-NEXT: .LBB48_4:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vl8re64.v v16, (a0)
; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
index c428b2d1249f58..4af0ae6d8fbfdd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
@@ -158,7 +158,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i
define void @repeat_shuffle(<2 x double> %v, ptr noalias %q) {
; CHECK-LABEL: repeat_shuffle:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; CHECK-NEXT: vslideup.vi v10, v8, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
index 42b2896d9f7998..2403a4e4286683 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
@@ -565,7 +565,7 @@ declare <vscale x 128 x i8> @llvm.vp.add.nxv128i8(<vscale x 128 x i8>, <vscale x
define <vscale x 128 x i8> @vadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
@@ -1344,7 +1344,7 @@ declare <vscale x 32 x i32> @llvm.vp.add.nxv32i32(<vscale x 32 x i32>, <vscale x
define <vscale x 32 x i32> @vadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -1401,7 +1401,7 @@ declare i32 @llvm.vscale.i32()
define <vscale x 32 x i32> @vadd_vi_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m) {
; CHECK-LABEL: vadd_vi_nxv32i32_evl_nx8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a2, a0, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vcpop.ll b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll
index ade86910076084..98be9750206a5a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcpop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll
@@ -43,7 +43,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv1i1(
define iXLen @intrinsic_vcpop_mask_m_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv1i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
@@ -98,7 +98,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv2i1(
define iXLen @intrinsic_vcpop_mask_m_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv2i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
@@ -139,7 +139,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv4i1(
define iXLen @intrinsic_vcpop_mask_m_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv4i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
@@ -180,7 +180,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv8i1(
define iXLen @intrinsic_vcpop_mask_m_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv8i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
@@ -221,7 +221,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv16i1(
define iXLen @intrinsic_vcpop_mask_m_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv16i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
@@ -262,7 +262,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv32i1(
define iXLen @intrinsic_vcpop_mask_m_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv32i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
@@ -303,7 +303,7 @@ declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv64i1(
define iXLen @intrinsic_vcpop_mask_m_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv64i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
index 9f2b2466385eae..7766ec72c4c381 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
@@ -7,7 +7,7 @@
define {<16 x i1>, <16 x i1>} @vector_deinterleave_v16i1_v32i1(<32 x i1> %vec) {
; CHECK-LABEL: vector_deinterleave_v16i1_v32i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
index c24a67743eb842..aa58db2e7bfcf7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
@@ -131,7 +131,7 @@ ret {<vscale x 64 x i1>, <vscale x 64 x i1>} %retval
define {<vscale x 64 x i8>, <vscale x 64 x i8>} @vector_deinterleave_nxv64i8_nxv128i8(<vscale x 128 x i8> %vec) {
; CHECK-LABEL: vector_deinterleave_nxv64i8_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v24, 0
@@ -147,7 +147,7 @@ ret {<vscale x 64 x i8>, <vscale x 64 x i8>} %retval
define {<vscale x 32 x i16>, <vscale x 32 x i16>} @vector_deinterleave_nxv32i16_nxv64i16(<vscale x 64 x i16> %vec) {
; CHECK-LABEL: vector_deinterleave_nxv32i16_nxv64i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v24, 0
@@ -163,7 +163,7 @@ ret {<vscale x 32 x i16>, <vscale x 32 x i16>} %retval
define {<vscale x 16 x i32>, <vscale x 16 x i32>} @vector_deinterleave_nxv16i32_nxvv32i32(<vscale x 32 x i32> %vec) {
; CHECK-LABEL: vector_deinterleave_nxv16i32_nxvv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v16
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
@@ -192,7 +192,7 @@ define {<vscale x 8 x i64>, <vscale x 8 x i64>} @vector_deinterleave_nxv8i64_nxv
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; CHECK-NEXT: vid.v v8
@@ -391,7 +391,7 @@ declare {<vscale x 2 x double>, <vscale x 2 x double>} @llvm.vector.deinterleave
define {<vscale x 32 x bfloat>, <vscale x 32 x bfloat>} @vector_deinterleave_nxv32bf16_nxv64bf16(<vscale x 64 x bfloat> %vec) {
; CHECK-LABEL: vector_deinterleave_nxv32bf16_nxv64bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v24, 0
@@ -407,7 +407,7 @@ ret {<vscale x 32 x bfloat>, <vscale x 32 x bfloat>} %retval
define {<vscale x 32 x half>, <vscale x 32 x half>} @vector_deinterleave_nxv32f16_nxv64f16(<vscale x 64 x half> %vec) {
; CHECK-LABEL: vector_deinterleave_nxv32f16_nxv64f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v24, 0
@@ -423,7 +423,7 @@ ret {<vscale x 32 x half>, <vscale x 32 x half>} %retval
define {<vscale x 16 x float>, <vscale x 16 x float>} @vector_deinterleave_nxv16f32_nxv32f32(<vscale x 32 x float> %vec) {
; CHECK-LABEL: vector_deinterleave_nxv16f32_nxv32f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v16
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
@@ -452,7 +452,7 @@ define {<vscale x 8 x double>, <vscale x 8 x double>} @vector_deinterleave_nxv8f
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; CHECK-NEXT: vid.v v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
index 4f57d5f5868dc5..6dfb82dea10104 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
@@ -91,7 +91,7 @@ define <8 x i32> @vector_interleave_v8i32_v4i32(<4 x i32> %a, <4 x i32> %b) {
define <4 x i64> @vector_interleave_v4i64_v2i64(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: vector_interleave_v4i64_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: lui a0, 12304
; CHECK-NEXT: addi a0, a0, 512
@@ -107,7 +107,7 @@ define <4 x i64> @vector_interleave_v4i64_v2i64(<2 x i64> %a, <2 x i64> %b) {
;
; ZVBB-LABEL: vector_interleave_v4i64_v2i64:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVBB-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVBB-NEXT: vmv1r.v v10, v9
; ZVBB-NEXT: lui a0, 12304
; ZVBB-NEXT: addi a0, a0, 512
@@ -241,7 +241,7 @@ define <8 x float> @vector_interleave_v8f32_v4f32(<4 x float> %a, <4 x float> %b
define <4 x double> @vector_interleave_v4f64_v2f64(<2 x double> %a, <2 x double> %b) {
; CHECK-LABEL: vector_interleave_v4f64_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: lui a0, 12304
; CHECK-NEXT: addi a0, a0, 512
@@ -257,7 +257,7 @@ define <4 x double> @vector_interleave_v4f64_v2f64(<2 x double> %a, <2 x double>
;
; ZVBB-LABEL: vector_interleave_v4f64_v2f64:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVBB-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVBB-NEXT: vmv1r.v v10, v9
; ZVBB-NEXT: lui a0, 12304
; ZVBB-NEXT: addi a0, a0, 512
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
index e5271da6cea130..a435308e431e91 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
@@ -9,7 +9,7 @@
define void @vector_interleave_store_nxv32i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, ptr %p) {
; CHECK-LABEL: vector_interleave_store_nxv32i1_nxv16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
index 56893aa8111fbc..574c6ce9b7c700 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
@@ -11,7 +11,7 @@
define <vscale x 32 x i1> @vector_interleave_nxv32i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
; CHECK-LABEL: vector_interleave_nxv32i1_nxv16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
@@ -33,7 +33,7 @@ define <vscale x 32 x i1> @vector_interleave_nxv32i1_nxv16i1(<vscale x 16 x i1>
;
; ZVBB-LABEL: vector_interleave_nxv32i1_nxv16i1:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVBB-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVBB-NEXT: vmv1r.v v9, v0
; ZVBB-NEXT: vmv1r.v v0, v8
; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, mu
@@ -162,7 +162,7 @@ declare <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64>,
define <vscale x 128 x i1> @vector_interleave_nxv128i1_nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) {
; CHECK-LABEL: vector_interleave_nxv128i1_nxv64i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
@@ -206,7 +206,7 @@ define <vscale x 128 x i1> @vector_interleave_nxv128i1_nxv64i1(<vscale x 64 x i1
define <vscale x 128 x i8> @vector_interleave_nxv128i8_nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
; CHECK-LABEL: vector_interleave_nxv128i8_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vwaddu.vv v8, v24, v16
@@ -219,7 +219,7 @@ define <vscale x 128 x i8> @vector_interleave_nxv128i8_nxv64i8(<vscale x 64 x i8
;
; ZVBB-LABEL: vector_interleave_nxv128i8_nxv64i8:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVBB-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; ZVBB-NEXT: vwsll.vi v8, v16, 8
@@ -235,7 +235,7 @@ define <vscale x 128 x i8> @vector_interleave_nxv128i8_nxv64i8(<vscale x 64 x i8
define <vscale x 64 x i16> @vector_interleave_nxv64i16_nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
; CHECK-LABEL: vector_interleave_nxv64i16_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vwaddu.vv v8, v24, v16
@@ -248,7 +248,7 @@ define <vscale x 64 x i16> @vector_interleave_nxv64i16_nxv32i16(<vscale x 32 x i
;
; ZVBB-LABEL: vector_interleave_nxv64i16_nxv32i16:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVBB-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVBB-NEXT: vwsll.vi v8, v16, 16
@@ -264,7 +264,7 @@ define <vscale x 64 x i16> @vector_interleave_nxv64i16_nxv32i16(<vscale x 32 x i
define <vscale x 32 x i32> @vector_interleave_nxv32i32_nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b) {
; CHECK-LABEL: vector_interleave_nxv32i32_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
; CHECK-NEXT: vwaddu.vv v8, v24, v16
@@ -277,7 +277,7 @@ define <vscale x 32 x i32> @vector_interleave_nxv32i32_nxv16i32(<vscale x 16 x i
;
; ZVBB-LABEL: vector_interleave_nxv32i32_nxv16i32:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVBB-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: li a0, 32
; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma
@@ -584,7 +584,7 @@ declare <vscale x 4 x double> @llvm.vector.interleave2.nxv4f64(<vscale x 2 x dou
define <vscale x 64 x bfloat> @vector_interleave_nxv64bf16_nxv32bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b) {
; CHECK-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vwaddu.vv v8, v24, v16
@@ -597,7 +597,7 @@ define <vscale x 64 x bfloat> @vector_interleave_nxv64bf16_nxv32bf16(<vscale x 3
;
; ZVBB-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVBB-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVBB-NEXT: vwsll.vi v8, v16, 16
@@ -613,7 +613,7 @@ define <vscale x 64 x bfloat> @vector_interleave_nxv64bf16_nxv32bf16(<vscale x 3
define <vscale x 64 x half> @vector_interleave_nxv64f16_nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b) {
; CHECK-LABEL: vector_interleave_nxv64f16_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vwaddu.vv v8, v24, v16
@@ -626,7 +626,7 @@ define <vscale x 64 x half> @vector_interleave_nxv64f16_nxv32f16(<vscale x 32 x
;
; ZVBB-LABEL: vector_interleave_nxv64f16_nxv32f16:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVBB-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVBB-NEXT: vwsll.vi v8, v16, 16
@@ -642,7 +642,7 @@ define <vscale x 64 x half> @vector_interleave_nxv64f16_nxv32f16(<vscale x 32 x
define <vscale x 32 x float> @vector_interleave_nxv32f32_nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b) {
; CHECK-LABEL: vector_interleave_nxv32f32_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
; CHECK-NEXT: vwaddu.vv v8, v24, v16
@@ -655,7 +655,7 @@ define <vscale x 32 x float> @vector_interleave_nxv32f32_nxv16f32(<vscale x 16 x
;
; ZVBB-LABEL: vector_interleave_nxv32f32_nxv16f32:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVBB-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: li a0, 32
; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll b/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
index 33bb3e89cb90ab..7a82db9875c7ce 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
@@ -120,7 +120,7 @@ entry:
define <vscale x 1 x i8> @vadd_vv_passthru(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: vadd_vv_passthru:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vadd.vv v10, v8, v9
@@ -153,7 +153,7 @@ entry:
define <vscale x 1 x i8> @vadd_vv_passthru_negative(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: vadd_vv_passthru_negative:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vadd.vv v10, v8, v9
@@ -185,7 +185,7 @@ entry:
define <vscale x 1 x i8> @vadd_vv_mask(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %m) nounwind {
; CHECK-LABEL: vadd_vv_mask:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vadd.vv v10, v8, v9, v0.t
@@ -221,7 +221,7 @@ entry:
define <vscale x 1 x i8> @vadd_vv_mask_negative(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %m, <vscale x 1 x i1> %m2) nounwind {
; CHECK-LABEL: vadd_vv_mask_negative:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vadd.vv v11, v8, v9, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
index 89c4e1a112d777..6d8fddb4a39d99 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
@@ -11,7 +11,7 @@ declare <vscale x 1 x i1> @llvm.vector.splice.nxv1i1(<vscale x 1 x i1>, <vscale
define <vscale x 1 x i1> @splice_nxv1i1_offset_negone(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv1i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
@@ -34,7 +34,7 @@ define <vscale x 1 x i1> @splice_nxv1i1_offset_negone(<vscale x 1 x i1> %a, <vsc
define <vscale x 1 x i1> @splice_nxv1i1_offset_max(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv1i1_offset_max:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
@@ -61,7 +61,7 @@ declare <vscale x 2 x i1> @llvm.vector.splice.nxv2i1(<vscale x 2 x i1>, <vscale
define <vscale x 2 x i1> @splice_nxv2i1_offset_negone(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv2i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
@@ -84,7 +84,7 @@ define <vscale x 2 x i1> @splice_nxv2i1_offset_negone(<vscale x 2 x i1> %a, <vsc
define <vscale x 2 x i1> @splice_nxv2i1_offset_max(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv2i1_offset_max:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
@@ -111,7 +111,7 @@ declare <vscale x 4 x i1> @llvm.vector.splice.nxv4i1(<vscale x 4 x i1>, <vscale
define <vscale x 4 x i1> @splice_nxv4i1_offset_negone(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv4i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
@@ -134,7 +134,7 @@ define <vscale x 4 x i1> @splice_nxv4i1_offset_negone(<vscale x 4 x i1> %a, <vsc
define <vscale x 4 x i1> @splice_nxv4i1_offset_max(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv4i1_offset_max:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
@@ -161,7 +161,7 @@ declare <vscale x 8 x i1> @llvm.vector.splice.nxv8i1(<vscale x 8 x i1>, <vscale
define <vscale x 8 x i1> @splice_nxv8i1_offset_negone(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv8i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
@@ -183,7 +183,7 @@ define <vscale x 8 x i1> @splice_nxv8i1_offset_negone(<vscale x 8 x i1> %a, <vsc
define <vscale x 8 x i1> @splice_nxv8i1_offset_max(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv8i1_offset_max:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
@@ -209,7 +209,7 @@ declare <vscale x 16 x i1> @llvm.vector.splice.nxv16i1(<vscale x 16 x i1>, <vsca
define <vscale x 16 x i1> @splice_nxv16i1_offset_negone(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv16i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
@@ -232,7 +232,7 @@ define <vscale x 16 x i1> @splice_nxv16i1_offset_negone(<vscale x 16 x i1> %a, <
define <vscale x 16 x i1> @splice_nxv16i1_offset_max(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv16i1_offset_max:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
@@ -259,7 +259,7 @@ declare <vscale x 32 x i1> @llvm.vector.splice.nxv32i1(<vscale x 32 x i1>, <vsca
define <vscale x 32 x i1> @splice_nxv32i1_offset_negone(<vscale x 32 x i1> %a, <vscale x 32 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv32i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
@@ -307,7 +307,7 @@ declare <vscale x 64 x i1> @llvm.vector.splice.nxv64i1(<vscale x 64 x i1>, <vsca
define <vscale x 64 x i1> @splice_nxv64i1_offset_negone(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv64i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
index f094d4ee175c38..4362e1e16c5841 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
@@ -462,7 +462,7 @@ declare <vscale x 16 x double> @llvm.vp.fabs.nxv16f64(<vscale x 16 x double>, <v
define <vscale x 16 x double> @vfabs_vv_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfabs_vv_nxv16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
index 5721737be3f3ed..d61b7580c681bb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
@@ -411,7 +411,7 @@ define <vscale x 32 x bfloat> @vfadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: addi a1, sp, 16
@@ -519,7 +519,7 @@ define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bf
; CHECK-NEXT: add a1, a2, a1
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: fmv.x.h a1, fa0
; CHECK-NEXT: csrr a2, vlenb
@@ -606,7 +606,7 @@ define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16_unmasked(<vscale x 32 x bfloat
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: fmv.x.h a1, fa0
; CHECK-NEXT: csrr a2, vlenb
@@ -1208,7 +1208,7 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: addi a1, sp, 16
@@ -1328,7 +1328,7 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: add a1, a2, a1
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
@@ -1421,7 +1421,7 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
index efde5b0b3a923f..f3608a7aa4be31 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
@@ -373,7 +373,7 @@ define <vscale x 32 x bfloat> @vfdiv_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: addi a1, sp, 16
@@ -481,7 +481,7 @@ define <vscale x 32 x bfloat> @vfdiv_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bf
; CHECK-NEXT: add a1, a2, a1
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: fmv.x.h a1, fa0
; CHECK-NEXT: csrr a2, vlenb
@@ -568,7 +568,7 @@ define <vscale x 32 x bfloat> @vfdiv_vf_nxv32bf16_unmasked(<vscale x 32 x bfloat
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: fmv.x.h a1, fa0
; CHECK-NEXT: csrr a2, vlenb
@@ -1120,7 +1120,7 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: addi a1, sp, 16
@@ -1240,7 +1240,7 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: add a1, a2, a1
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
@@ -1333,7 +1333,7 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfirst.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll
index e803a8416fba8b..1adb003932dc67 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfirst.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll
@@ -43,7 +43,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv1i1(
define iXLen @intrinsic_vfirst_mask_m_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv1i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
@@ -98,7 +98,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv2i1(
define iXLen @intrinsic_vfirst_mask_m_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv2i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
@@ -139,7 +139,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv4i1(
define iXLen @intrinsic_vfirst_mask_m_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv4i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
@@ -180,7 +180,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv8i1(
define iXLen @intrinsic_vfirst_mask_m_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv8i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
@@ -221,7 +221,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv16i1(
define iXLen @intrinsic_vfirst_mask_m_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv16i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
@@ -262,7 +262,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv32i1(
define iXLen @intrinsic_vfirst_mask_m_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv32i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
@@ -303,7 +303,7 @@ declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv64i1(
define iXLen @intrinsic_vfirst_mask_m_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv64i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
index b1652005285a06..0dcf870f126f0c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
@@ -628,7 +628,7 @@ define <vscale x 32 x bfloat> @vfma_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <vs
; CHECK-NEXT: add a2, a2, a3
; CHECK-NEXT: sub sp, sp, a2
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vl8re16.v v0, (a0)
; CHECK-NEXT: csrr a2, vlenb
@@ -2194,7 +2194,7 @@ define <vscale x 32 x half> @vfma_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: add a2, a2, a3
; ZVFHMIN-NEXT: sub sp, sp, a2
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v24, v0
; ZVFHMIN-NEXT: vl8re16.v v0, (a0)
; ZVFHMIN-NEXT: csrr a2, vlenb
@@ -3664,7 +3664,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: add a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -7799,7 +7799,7 @@ define <vscale x 16 x half> @vfnmadd_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
;
; ZVFHMIN-LABEL: vfnmadd_vv_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv4r.v v4, v8
; ZVFHMIN-NEXT: lui a1, 8
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
@@ -8156,7 +8156,7 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_neg_splat_commute(<vscale x 16
;
; ZVFHMIN-LABEL: vfnmadd_vf_nxv16f16_neg_splat_commute:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv4r.v v4, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
@@ -8258,7 +8258,7 @@ define <vscale x 16 x half> @vfnmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
;
; ZVFHMIN-LABEL: vfnmsub_vv_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv4r.v v4, v8
; ZVFHMIN-NEXT: lui a1, 8
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
@@ -8559,7 +8559,7 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_neg_splat(<vscale x 16 x half>
;
; ZVFHMIN-LABEL: vfnmsub_vf_nxv16f16_neg_splat:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv4r.v v4, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
@@ -8719,7 +8719,7 @@ define <vscale x 32 x half> @vfmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v8
; ZVFHMIN-NEXT: vl8re16.v v8, (a0)
; ZVFHMIN-NEXT: lui a2, 8
@@ -9285,7 +9285,7 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %v
; ZVFHMIN-NEXT: add a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: fmv.x.h a2, fa0
; ZVFHMIN-NEXT: lui a1, 8
@@ -10010,7 +10010,7 @@ define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked_commuted(<vscale x 32
; ZVFHMIN-NEXT: add a2, a2, a3
; ZVFHMIN-NEXT: sub sp, sp, a2
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
@@ -11164,7 +11164,7 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute(<vsc
; ZVFHMIN-NEXT: add a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
@@ -11777,7 +11777,7 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked_commuted(<vscale x 32
; ZVFHMIN-NEXT: add a2, a2, a3
; ZVFHMIN-NEXT: sub sp, sp, a2
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x29, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 41 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
@@ -11936,7 +11936,7 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, half
; ZVFHMIN-NEXT: add a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: fmv.x.h a2, fa0
; ZVFHMIN-NEXT: lui a3, 8
@@ -12088,7 +12088,7 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_commute(<vscale x 32 x half> %v
; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: fmv.x.h a2, fa0
; ZVFHMIN-NEXT: lui a3, 8
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
index 949614ef2b3b2e..c7c3cb96770ab5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
@@ -227,7 +227,7 @@ define <vscale x 32 x bfloat> @vfmadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v0, v16
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -315,7 +315,7 @@ define <vscale x 32 x bfloat> @vfmadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, <
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: sub sp, sp, a0
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v24, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -666,7 +666,7 @@ define <vscale x 32 x half> @vfmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -760,7 +760,7 @@ define <vscale x 32 x half> @vfmadd_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
index 0374f0c9dbe327..d31a6c7993a7d0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
@@ -226,7 +226,7 @@ define <vscale x 32 x bfloat> @vfmadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <
; ZVFH-NEXT: slli a1, a1, 5
; ZVFH-NEXT: sub sp, sp, a1
; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv8r.v v0, v16
; ZVFH-NEXT: addi a1, sp, 16
; ZVFH-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -317,7 +317,7 @@ define <vscale x 32 x bfloat> @vfmadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <
; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -404,7 +404,7 @@ define <vscale x 32 x bfloat> @vfmadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, <
; ZVFH-NEXT: slli a0, a0, 5
; ZVFH-NEXT: sub sp, sp, a0
; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv8r.v v0, v16
; ZVFH-NEXT: addi a0, sp, 16
; ZVFH-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
@@ -501,7 +501,7 @@ define <vscale x 32 x bfloat> @vfmadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, <
; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: sub sp, sp, a0
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
@@ -879,7 +879,7 @@ define <vscale x 32 x half> @vfmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -972,7 +972,7 @@ define <vscale x 32 x half> @vfmadd_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: sub sp, sp, a0
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
index 6ac178ef5bfa39..fb247a8e08b61a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
@@ -183,7 +183,7 @@ define <vscale x 32 x bfloat> @vfmax_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: addi a1, sp, 16
@@ -517,7 +517,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: addi a1, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
index 303c6a2ad173cb..92ce1f114db4d2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
@@ -183,7 +183,7 @@ define <vscale x 32 x bfloat> @vfmin_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: addi a1, sp, 16
@@ -517,7 +517,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: addi a1, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
index 98fbc041fd4a57..462fdd4e8fdf04 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
@@ -495,7 +495,7 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: addi a1, sp, 16
@@ -615,7 +615,7 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: add a1, a2, a1
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
@@ -708,7 +708,7 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
index 610161b33365ef..69ed85267c7ce8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
@@ -1112,7 +1112,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a3, 24
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
index c70dfaa8de38b4..cdb9d8edd676ab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
@@ -450,7 +450,7 @@ declare <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double>, <v
define <vscale x 16 x double> @vfneg_vv_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfneg_vv_nxv16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
index 72bec44cc06483..e6b3b92db0a1ef 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
@@ -329,7 +329,7 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: li a2, 24
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
index 63cbfb32a2aa14..c91797f6764cac 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
@@ -310,7 +310,7 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a1, a1, 4
@@ -390,7 +390,7 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v0, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
index a90fe931a190d6..02390cb8d878e7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
@@ -96,7 +96,7 @@ declare <vscale x 32 x float> @llvm.vp.fpext.nxv32f32.nxv32f16(<vscale x 32 x ha
define <vscale x 32 x float> @vfpext_nxv32f16_nxv32f32(<vscale x 32 x half> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vfpext_nxv32f16_nxv32f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
index c7ae8c50b46dd9..ba653a844ccfeb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.vp.fptosi.nxv32i16.nxv32f32(<vscale x 32 x flo
define <vscale x 32 x i16> @vfptosi_nxv32i16_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptosi_nxv32i16_nxv32f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -539,7 +539,7 @@ declare <vscale x 32 x i32> @llvm.vp.fptosi.nxv32i32.nxv32f32(<vscale x 32 x flo
define <vscale x 32 x i32> @vfptosi_nxv32i32_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptosi_nxv32i32_nxv32f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
index 125f18094b3476..0cdc95052c4ad2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
@@ -508,7 +508,7 @@ declare <vscale x 32 x i16> @llvm.vp.fptoui.nxv32i16.nxv32f32(<vscale x 32 x flo
define <vscale x 32 x i16> @vfptoui_nxv32i16_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptoui_nxv32i16_nxv32f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -539,7 +539,7 @@ declare <vscale x 32 x i32> @llvm.vp.fptoui.nxv32i32.nxv32f32(<vscale x 32 x flo
define <vscale x 32 x i32> @vfptoui_nxv32i32_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptoui_nxv32i32_nxv32f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
index 2bd2923339e34b..dca522308fc50c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
@@ -102,7 +102,7 @@ define <vscale x 16 x float> @vfptrunc_nxv16f32_nxv16f64(<vscale x 16 x double>
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -148,7 +148,7 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
index 0036839f7434a2..699fa26d885294 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
@@ -167,7 +167,7 @@ declare <vscale x 32 x bfloat> @llvm.vp.sqrt.nxv32bf16(<vscale x 32 x bfloat>, <
define <vscale x 32 x bfloat> @vfsqrt_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsqrt_vv_nxv32bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a1, a2, 1
@@ -453,7 +453,7 @@ define <vscale x 32 x half> @vfsqrt_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vfsqrt_vv_nxv32f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v16, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
@@ -751,7 +751,7 @@ declare <vscale x 16 x double> @llvm.vp.sqrt.nxv16f64(<vscale x 16 x double>, <v
define <vscale x 16 x double> @vfsqrt_vv_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsqrt_vv_nxv16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
index e4ac9130c46357..b4ec48d5f70515 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
@@ -373,7 +373,7 @@ define <vscale x 32 x bfloat> @vfsub_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <v
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: addi a1, sp, 16
@@ -481,7 +481,7 @@ define <vscale x 32 x bfloat> @vfsub_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bf
; CHECK-NEXT: add a1, a2, a1
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: fmv.x.h a1, fa0
; CHECK-NEXT: csrr a2, vlenb
@@ -568,7 +568,7 @@ define <vscale x 32 x bfloat> @vfsub_vf_nxv32bf16_unmasked(<vscale x 32 x bfloat
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: fmv.x.h a1, fa0
; CHECK-NEXT: csrr a2, vlenb
@@ -1120,7 +1120,7 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: addi a1, sp, 16
@@ -1240,7 +1240,7 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: add a1, a2, a1
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
@@ -1333,7 +1333,7 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
index c00ac63ace8b52..bcf619eb0bb74c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
@@ -111,7 +111,7 @@ define <vscale x 4 x i32> @different_vl_with_ta(<vscale x 4 x i32> %a, <vscale x
define <vscale x 4 x i32> @different_vl_with_tu(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1, iXLen %vl2) {
; CHECK-LABEL: different_vl_with_tu:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v14, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
; CHECK-NEXT: vadd.vv v14, v10, v12
@@ -127,7 +127,7 @@ define <vscale x 4 x i32> @different_vl_with_tu(<vscale x 4 x i32> %passthru, <v
define <vscale x 4 x i32> @different_imm_vl_with_tu(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1, iXLen %vl2) {
; CHECK-LABEL: different_imm_vl_with_tu:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v14, v10
; CHECK-NEXT: vsetivli zero, 5, e32, m2, tu, ma
; CHECK-NEXT: vadd.vv v14, v10, v12
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll
index 389d1f3ed81e47..605c5cce6731be 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll
@@ -51,7 +51,7 @@ entry:
define <vscale x 16 x i16> @test_vlseg2ff_mask_dead_vl(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vlseg2ff_mask_dead_vl:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll
index 889d7600e0d220..445e3498bc1443 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll
@@ -25,7 +25,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg2ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -65,7 +65,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg2ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -105,7 +105,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg2ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -145,7 +145,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg2ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -185,7 +185,7 @@ entry:
define <vscale x 16 x i8> @test_vlseg2ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -225,7 +225,7 @@ entry:
define <vscale x 32 x i8> @test_vlseg2ff_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 32 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -265,7 +265,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg3ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -306,7 +306,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg3ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -347,7 +347,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg3ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -388,7 +388,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg3ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -429,7 +429,7 @@ entry:
define <vscale x 16 x i8> @test_vlseg3ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -470,7 +470,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg4ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -512,7 +512,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg4ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -554,7 +554,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg4ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -596,7 +596,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg4ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -638,7 +638,7 @@ entry:
define <vscale x 16 x i8> @test_vlseg4ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -680,7 +680,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg5ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -723,7 +723,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg5ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -766,7 +766,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg5ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -809,7 +809,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg5ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -852,7 +852,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg6ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -896,7 +896,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg6ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -940,7 +940,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg6ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -984,7 +984,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg6ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1028,7 +1028,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1073,7 +1073,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1118,7 +1118,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1163,7 +1163,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1208,7 +1208,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1254,7 +1254,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1300,7 +1300,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1346,7 +1346,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1391,7 +1391,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg2ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1430,7 +1430,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg2ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1469,7 +1469,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg2ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1508,7 +1508,7 @@ entry:
define <vscale x 8 x i16> @test_vlseg2ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1547,7 +1547,7 @@ entry:
define <vscale x 16 x i16> @test_vlseg2ff_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1586,7 +1586,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg3ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1626,7 +1626,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg3ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1666,7 +1666,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg3ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1706,7 +1706,7 @@ entry:
define <vscale x 8 x i16> @test_vlseg3ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -1746,7 +1746,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg4ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1787,7 +1787,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg4ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1828,7 +1828,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg4ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1869,7 +1869,7 @@ entry:
define <vscale x 8 x i16> @test_vlseg4ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -1910,7 +1910,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg5ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1952,7 +1952,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg5ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1994,7 +1994,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg5ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2036,7 +2036,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg6ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2079,7 +2079,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg6ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2122,7 +2122,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg6ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2165,7 +2165,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2209,7 +2209,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2253,7 +2253,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2297,7 +2297,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2342,7 +2342,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2387,7 +2387,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2432,7 +2432,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg2ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -2471,7 +2471,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg2ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -2510,7 +2510,7 @@ entry:
define <vscale x 4 x i32> @test_vlseg2ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -2549,7 +2549,7 @@ entry:
define <vscale x 8 x i32> @test_vlseg2ff_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -2588,7 +2588,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg3ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2628,7 +2628,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg3ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2668,7 +2668,7 @@ entry:
define <vscale x 4 x i32> @test_vlseg3ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -2708,7 +2708,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg4ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2749,7 +2749,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg4ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2790,7 +2790,7 @@ entry:
define <vscale x 4 x i32> @test_vlseg4ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -2831,7 +2831,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg5ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2873,7 +2873,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg5ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2915,7 +2915,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg6ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2958,7 +2958,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg6ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3001,7 +3001,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3045,7 +3045,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3089,7 +3089,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3134,7 +3134,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3179,7 +3179,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg2ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -3218,7 +3218,7 @@ entry:
define <vscale x 2 x i64> @test_vlseg2ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -3257,7 +3257,7 @@ entry:
define <vscale x 4 x i64> @test_vlseg2ff_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -3296,7 +3296,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg3ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3336,7 +3336,7 @@ entry:
define <vscale x 2 x i64> @test_vlseg3ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -3376,7 +3376,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg4ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3417,7 +3417,7 @@ entry:
define <vscale x 2 x i64> @test_vlseg4ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -3458,7 +3458,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg5ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3500,7 +3500,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg6ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3543,7 +3543,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3587,7 +3587,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3631,7 +3631,7 @@ entry:
define <vscale x 1 x half> @test_vlseg2ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -3669,7 +3669,7 @@ entry:
define <vscale x 2 x half> @test_vlseg2ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -3707,7 +3707,7 @@ entry:
define <vscale x 4 x half> @test_vlseg2ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -3745,7 +3745,7 @@ entry:
define <vscale x 8 x half> @test_vlseg2ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -3783,7 +3783,7 @@ entry:
define <vscale x 16 x half> @test_vlseg2ff_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -3821,7 +3821,7 @@ entry:
define <vscale x 1 x half> @test_vlseg3ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3860,7 +3860,7 @@ entry:
define <vscale x 2 x half> @test_vlseg3ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3899,7 +3899,7 @@ entry:
define <vscale x 4 x half> @test_vlseg3ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3938,7 +3938,7 @@ entry:
define <vscale x 8 x half> @test_vlseg3ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -3977,7 +3977,7 @@ entry:
define <vscale x 1 x half> @test_vlseg4ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4017,7 +4017,7 @@ entry:
define <vscale x 2 x half> @test_vlseg4ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4057,7 +4057,7 @@ entry:
define <vscale x 4 x half> @test_vlseg4ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4097,7 +4097,7 @@ entry:
define <vscale x 8 x half> @test_vlseg4ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -4137,7 +4137,7 @@ entry:
define <vscale x 1 x half> @test_vlseg5ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4178,7 +4178,7 @@ entry:
define <vscale x 2 x half> @test_vlseg5ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4219,7 +4219,7 @@ entry:
define <vscale x 4 x half> @test_vlseg5ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4260,7 +4260,7 @@ entry:
define <vscale x 1 x half> @test_vlseg6ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4302,7 +4302,7 @@ entry:
define <vscale x 2 x half> @test_vlseg6ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4344,7 +4344,7 @@ entry:
define <vscale x 4 x half> @test_vlseg6ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4386,7 +4386,7 @@ entry:
define <vscale x 1 x half> @test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4429,7 +4429,7 @@ entry:
define <vscale x 2 x half> @test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4472,7 +4472,7 @@ entry:
define <vscale x 4 x half> @test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4515,7 +4515,7 @@ entry:
define <vscale x 1 x half> @test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4559,7 +4559,7 @@ entry:
define <vscale x 2 x half> @test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4603,7 +4603,7 @@ entry:
define <vscale x 4 x half> @test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4647,7 +4647,7 @@ entry:
define <vscale x 1 x float> @test_vlseg2ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -4685,7 +4685,7 @@ entry:
define <vscale x 2 x float> @test_vlseg2ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -4723,7 +4723,7 @@ entry:
define <vscale x 4 x float> @test_vlseg2ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -4761,7 +4761,7 @@ entry:
define <vscale x 8 x float> @test_vlseg2ff_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -4799,7 +4799,7 @@ entry:
define <vscale x 1 x float> @test_vlseg3ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4838,7 +4838,7 @@ entry:
define <vscale x 2 x float> @test_vlseg3ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4877,7 +4877,7 @@ entry:
define <vscale x 4 x float> @test_vlseg3ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -4916,7 +4916,7 @@ entry:
define <vscale x 1 x float> @test_vlseg4ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4956,7 +4956,7 @@ entry:
define <vscale x 2 x float> @test_vlseg4ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4996,7 +4996,7 @@ entry:
define <vscale x 4 x float> @test_vlseg4ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -5036,7 +5036,7 @@ entry:
define <vscale x 1 x float> @test_vlseg5ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5077,7 +5077,7 @@ entry:
define <vscale x 2 x float> @test_vlseg5ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5118,7 +5118,7 @@ entry:
define <vscale x 1 x float> @test_vlseg6ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5160,7 +5160,7 @@ entry:
define <vscale x 2 x float> @test_vlseg6ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5202,7 +5202,7 @@ entry:
define <vscale x 1 x float> @test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5245,7 +5245,7 @@ entry:
define <vscale x 2 x float> @test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5288,7 +5288,7 @@ entry:
define <vscale x 1 x float> @test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5332,7 +5332,7 @@ entry:
define <vscale x 2 x float> @test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5376,7 +5376,7 @@ entry:
define <vscale x 1 x double> @test_vlseg2ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -5414,7 +5414,7 @@ entry:
define <vscale x 2 x double> @test_vlseg2ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -5452,7 +5452,7 @@ entry:
define <vscale x 4 x double> @test_vlseg2ff_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -5490,7 +5490,7 @@ entry:
define <vscale x 1 x double> @test_vlseg3ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5529,7 +5529,7 @@ entry:
define <vscale x 2 x double> @test_vlseg3ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -5568,7 +5568,7 @@ entry:
define <vscale x 1 x double> @test_vlseg4ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5608,7 +5608,7 @@ entry:
define <vscale x 2 x double> @test_vlseg4ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -5648,7 +5648,7 @@ entry:
define <vscale x 1 x double> @test_vlseg5ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5689,7 +5689,7 @@ entry:
define <vscale x 1 x double> @test_vlseg6ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5731,7 +5731,7 @@ entry:
define <vscale x 1 x double> @test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5774,7 +5774,7 @@ entry:
define <vscale x 1 x double> @test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5818,7 +5818,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg2ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -5856,7 +5856,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg2ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -5894,7 +5894,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg2ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -5932,7 +5932,7 @@ entry:
define <vscale x 8 x bfloat> @test_vlseg2ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -5970,7 +5970,7 @@ entry:
define <vscale x 16 x bfloat> @test_vlseg2ff_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -6008,7 +6008,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg3ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6047,7 +6047,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg3ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6086,7 +6086,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg3ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6125,7 +6125,7 @@ entry:
define <vscale x 8 x bfloat> @test_vlseg3ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -6164,7 +6164,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg4ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6204,7 +6204,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg4ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6244,7 +6244,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg4ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6284,7 +6284,7 @@ entry:
define <vscale x 8 x bfloat> @test_vlseg4ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -6324,7 +6324,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg5ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6365,7 +6365,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg5ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6406,7 +6406,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg5ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6447,7 +6447,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg6ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6489,7 +6489,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg6ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6531,7 +6531,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg6ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6573,7 +6573,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6616,7 +6616,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6659,7 +6659,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6702,7 +6702,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6746,7 +6746,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6790,7 +6790,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll
index b87602e592f2be..62322b9d364d27 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll
@@ -51,7 +51,7 @@ entry:
define <vscale x 16 x i16> @test_vlseg2ff_mask_dead_vl(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vlseg2ff_mask_dead_vl:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll
index ec322c04d1ca06..611b41767e7ae3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll
@@ -25,7 +25,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg2ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -65,7 +65,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg2ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -105,7 +105,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg2ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -145,7 +145,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg2ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -185,7 +185,7 @@ entry:
define <vscale x 16 x i8> @test_vlseg2ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -225,7 +225,7 @@ entry:
define <vscale x 32 x i8> @test_vlseg2ff_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 32 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -265,7 +265,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg3ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -306,7 +306,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg3ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -347,7 +347,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg3ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -388,7 +388,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg3ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -429,7 +429,7 @@ entry:
define <vscale x 16 x i8> @test_vlseg3ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -470,7 +470,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg4ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -512,7 +512,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg4ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -554,7 +554,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg4ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -596,7 +596,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg4ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -638,7 +638,7 @@ entry:
define <vscale x 16 x i8> @test_vlseg4ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -680,7 +680,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg5ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -723,7 +723,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg5ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -766,7 +766,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg5ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -809,7 +809,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg5ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -852,7 +852,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg6ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -896,7 +896,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg6ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -940,7 +940,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg6ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -984,7 +984,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg6ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1028,7 +1028,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1073,7 +1073,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1118,7 +1118,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1163,7 +1163,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1208,7 +1208,7 @@ entry:
define <vscale x 1 x i8> @test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1254,7 +1254,7 @@ entry:
define <vscale x 2 x i8> @test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1300,7 +1300,7 @@ entry:
define <vscale x 4 x i8> @test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1346,7 +1346,7 @@ entry:
define <vscale x 8 x i8> @test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1391,7 +1391,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg2ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1430,7 +1430,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg2ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1469,7 +1469,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg2ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1508,7 +1508,7 @@ entry:
define <vscale x 8 x i16> @test_vlseg2ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1547,7 +1547,7 @@ entry:
define <vscale x 16 x i16> @test_vlseg2ff_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1586,7 +1586,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg3ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1626,7 +1626,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg3ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1666,7 +1666,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg3ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1706,7 +1706,7 @@ entry:
define <vscale x 8 x i16> @test_vlseg3ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -1746,7 +1746,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg4ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1787,7 +1787,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg4ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1828,7 +1828,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg4ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1869,7 +1869,7 @@ entry:
define <vscale x 8 x i16> @test_vlseg4ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -1910,7 +1910,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg5ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1952,7 +1952,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg5ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -1994,7 +1994,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg5ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2036,7 +2036,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg6ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2079,7 +2079,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg6ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2122,7 +2122,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg6ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2165,7 +2165,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2209,7 +2209,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2253,7 +2253,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2297,7 +2297,7 @@ entry:
define <vscale x 1 x i16> @test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2342,7 +2342,7 @@ entry:
define <vscale x 2 x i16> @test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2387,7 +2387,7 @@ entry:
define <vscale x 4 x i16> @test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2432,7 +2432,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg2ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -2471,7 +2471,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg2ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -2510,7 +2510,7 @@ entry:
define <vscale x 4 x i32> @test_vlseg2ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -2549,7 +2549,7 @@ entry:
define <vscale x 8 x i32> @test_vlseg2ff_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -2588,7 +2588,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg3ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2628,7 +2628,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg3ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2668,7 +2668,7 @@ entry:
define <vscale x 4 x i32> @test_vlseg3ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -2708,7 +2708,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg4ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2749,7 +2749,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg4ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2790,7 +2790,7 @@ entry:
define <vscale x 4 x i32> @test_vlseg4ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -2831,7 +2831,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg5ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2873,7 +2873,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg5ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2915,7 +2915,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg6ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -2958,7 +2958,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg6ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3001,7 +3001,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3045,7 +3045,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3089,7 +3089,7 @@ entry:
define <vscale x 1 x i32> @test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3134,7 +3134,7 @@ entry:
define <vscale x 2 x i32> @test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3179,7 +3179,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg2ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -3218,7 +3218,7 @@ entry:
define <vscale x 2 x i64> @test_vlseg2ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -3257,7 +3257,7 @@ entry:
define <vscale x 4 x i64> @test_vlseg2ff_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -3296,7 +3296,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg3ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3336,7 +3336,7 @@ entry:
define <vscale x 2 x i64> @test_vlseg3ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -3376,7 +3376,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg4ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3417,7 +3417,7 @@ entry:
define <vscale x 2 x i64> @test_vlseg4ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -3458,7 +3458,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg5ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3500,7 +3500,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg6ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3543,7 +3543,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3587,7 +3587,7 @@ entry:
define <vscale x 1 x i64> @test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3631,7 +3631,7 @@ entry:
define <vscale x 1 x half> @test_vlseg2ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -3669,7 +3669,7 @@ entry:
define <vscale x 2 x half> @test_vlseg2ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -3707,7 +3707,7 @@ entry:
define <vscale x 4 x half> @test_vlseg2ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -3745,7 +3745,7 @@ entry:
define <vscale x 8 x half> @test_vlseg2ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -3783,7 +3783,7 @@ entry:
define <vscale x 16 x half> @test_vlseg2ff_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -3821,7 +3821,7 @@ entry:
define <vscale x 1 x half> @test_vlseg3ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3860,7 +3860,7 @@ entry:
define <vscale x 2 x half> @test_vlseg3ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3899,7 +3899,7 @@ entry:
define <vscale x 4 x half> @test_vlseg3ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -3938,7 +3938,7 @@ entry:
define <vscale x 8 x half> @test_vlseg3ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -3977,7 +3977,7 @@ entry:
define <vscale x 1 x half> @test_vlseg4ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4017,7 +4017,7 @@ entry:
define <vscale x 2 x half> @test_vlseg4ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4057,7 +4057,7 @@ entry:
define <vscale x 4 x half> @test_vlseg4ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4097,7 +4097,7 @@ entry:
define <vscale x 8 x half> @test_vlseg4ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -4137,7 +4137,7 @@ entry:
define <vscale x 1 x half> @test_vlseg5ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4178,7 +4178,7 @@ entry:
define <vscale x 2 x half> @test_vlseg5ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4219,7 +4219,7 @@ entry:
define <vscale x 4 x half> @test_vlseg5ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4260,7 +4260,7 @@ entry:
define <vscale x 1 x half> @test_vlseg6ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4302,7 +4302,7 @@ entry:
define <vscale x 2 x half> @test_vlseg6ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4344,7 +4344,7 @@ entry:
define <vscale x 4 x half> @test_vlseg6ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4386,7 +4386,7 @@ entry:
define <vscale x 1 x half> @test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4429,7 +4429,7 @@ entry:
define <vscale x 2 x half> @test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4472,7 +4472,7 @@ entry:
define <vscale x 4 x half> @test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4515,7 +4515,7 @@ entry:
define <vscale x 1 x half> @test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4559,7 +4559,7 @@ entry:
define <vscale x 2 x half> @test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4603,7 +4603,7 @@ entry:
define <vscale x 4 x half> @test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4647,7 +4647,7 @@ entry:
define <vscale x 1 x float> @test_vlseg2ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -4685,7 +4685,7 @@ entry:
define <vscale x 2 x float> @test_vlseg2ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -4723,7 +4723,7 @@ entry:
define <vscale x 4 x float> @test_vlseg2ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -4761,7 +4761,7 @@ entry:
define <vscale x 8 x float> @test_vlseg2ff_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -4799,7 +4799,7 @@ entry:
define <vscale x 1 x float> @test_vlseg3ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4838,7 +4838,7 @@ entry:
define <vscale x 2 x float> @test_vlseg3ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4877,7 +4877,7 @@ entry:
define <vscale x 4 x float> @test_vlseg3ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -4916,7 +4916,7 @@ entry:
define <vscale x 1 x float> @test_vlseg4ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4956,7 +4956,7 @@ entry:
define <vscale x 2 x float> @test_vlseg4ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -4996,7 +4996,7 @@ entry:
define <vscale x 4 x float> @test_vlseg4ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -5036,7 +5036,7 @@ entry:
define <vscale x 1 x float> @test_vlseg5ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5077,7 +5077,7 @@ entry:
define <vscale x 2 x float> @test_vlseg5ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5118,7 +5118,7 @@ entry:
define <vscale x 1 x float> @test_vlseg6ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5160,7 +5160,7 @@ entry:
define <vscale x 2 x float> @test_vlseg6ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5202,7 +5202,7 @@ entry:
define <vscale x 1 x float> @test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5245,7 +5245,7 @@ entry:
define <vscale x 2 x float> @test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5288,7 +5288,7 @@ entry:
define <vscale x 1 x float> @test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5332,7 +5332,7 @@ entry:
define <vscale x 2 x float> @test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5376,7 +5376,7 @@ entry:
define <vscale x 1 x double> @test_vlseg2ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -5414,7 +5414,7 @@ entry:
define <vscale x 2 x double> @test_vlseg2ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -5452,7 +5452,7 @@ entry:
define <vscale x 4 x double> @test_vlseg2ff_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -5490,7 +5490,7 @@ entry:
define <vscale x 1 x double> @test_vlseg3ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5529,7 +5529,7 @@ entry:
define <vscale x 2 x double> @test_vlseg3ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -5568,7 +5568,7 @@ entry:
define <vscale x 1 x double> @test_vlseg4ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5608,7 +5608,7 @@ entry:
define <vscale x 2 x double> @test_vlseg4ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -5648,7 +5648,7 @@ entry:
define <vscale x 1 x double> @test_vlseg5ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5689,7 +5689,7 @@ entry:
define <vscale x 1 x double> @test_vlseg6ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5731,7 +5731,7 @@ entry:
define <vscale x 1 x double> @test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5774,7 +5774,7 @@ entry:
define <vscale x 1 x double> @test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -5818,7 +5818,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg2ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -5856,7 +5856,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg2ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -5894,7 +5894,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg2ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -5932,7 +5932,7 @@ entry:
define <vscale x 8 x bfloat> @test_vlseg2ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -5970,7 +5970,7 @@ entry:
define <vscale x 16 x bfloat> @test_vlseg2ff_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -6008,7 +6008,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg3ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6047,7 +6047,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg3ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6086,7 +6086,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg3ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6125,7 +6125,7 @@ entry:
define <vscale x 8 x bfloat> @test_vlseg3ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg3ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -6164,7 +6164,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg4ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6204,7 +6204,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg4ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6244,7 +6244,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg4ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6284,7 +6284,7 @@ entry:
define <vscale x 8 x bfloat> @test_vlseg4ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg4ff_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: vmv2r.v v10, v12
@@ -6324,7 +6324,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg5ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6365,7 +6365,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg5ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6406,7 +6406,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg5ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg5ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6447,7 +6447,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg6ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6489,7 +6489,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg6ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6531,7 +6531,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg6ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg6ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6573,7 +6573,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6616,7 +6616,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6659,7 +6659,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg7ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6702,7 +6702,7 @@ entry:
define <vscale x 1 x bfloat> @test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6746,7 +6746,7 @@ entry:
define <vscale x 2 x bfloat> @test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
@@ -6790,7 +6790,7 @@ entry:
define <vscale x 4 x bfloat> @test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
; CHECK-LABEL: test_vlseg8ff_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: vmv1r.v v9, v10
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
index a9a6521aaa9df8..60baf5439626b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
@@ -412,7 +412,7 @@ declare <vscale x 128 x i8> @llvm.vp.smax.nxv128i8(<vscale x 128 x i8>, <vscale
define <vscale x 128 x i8> @vmax_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -975,7 +975,7 @@ declare <vscale x 32 x i32> @llvm.vp.smax.nxv32i32(<vscale x 32 x i32>, <vscale
define <vscale x 32 x i32> @vmax_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: srli a3, a2, 2
@@ -1036,7 +1036,7 @@ declare i32 @llvm.vscale.i32()
define <vscale x 32 x i32> @vmax_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
; CHECK-LABEL: vmax_vx_nxv32i32_evl_nx8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a3, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
index 37f42db58ef3cb..705024934da794 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
@@ -410,7 +410,7 @@ declare <vscale x 128 x i8> @llvm.vp.umax.nxv128i8(<vscale x 128 x i8>, <vscale
define <vscale x 128 x i8> @vmaxu_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -973,7 +973,7 @@ declare <vscale x 32 x i32> @llvm.vp.umax.nxv32i32(<vscale x 32 x i32>, <vscale
define <vscale x 32 x i32> @vmaxu_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: srli a3, a2, 2
@@ -1034,7 +1034,7 @@ declare i32 @llvm.vscale.i32()
define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
; CHECK-LABEL: vmaxu_vx_nxv32i32_evl_nx8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a3, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll
index b82fe5a19ea7e9..fa55989ccaba84 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll
@@ -658,7 +658,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16.f16(
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -706,7 +706,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16.f16(
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -754,7 +754,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16.f16(
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -802,7 +802,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16.f16(
define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -850,7 +850,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16.f16(
define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -898,7 +898,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32(
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -946,7 +946,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32(
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -994,7 +994,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32(
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -1042,7 +1042,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32(
define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -1090,7 +1090,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64(
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -1138,7 +1138,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64(
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -1186,7 +1186,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64(
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge.ll
index 3240190bd2b381..ad84f6b9e69825 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfge.ll
@@ -658,7 +658,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16.f16(
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -706,7 +706,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16.f16(
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -754,7 +754,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16.f16(
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -802,7 +802,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16.f16(
define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -850,7 +850,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16.f16(
define <vscale x 16 x i1> @intrinsic_vmfge_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -898,7 +898,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32(
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -946,7 +946,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32(
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -994,7 +994,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32(
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -1042,7 +1042,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32(
define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -1090,7 +1090,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64(
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -1138,7 +1138,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64(
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -1186,7 +1186,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64(
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll
index 21e935a0cb4314..90a3ebb8805ad1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll
@@ -658,7 +658,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16.f16(
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -706,7 +706,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16.f16(
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -754,7 +754,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16.f16(
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -802,7 +802,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16.f16(
define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -850,7 +850,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16.f16(
define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -898,7 +898,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32(
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -946,7 +946,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32(
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -994,7 +994,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32(
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -1042,7 +1042,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32(
define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -1090,7 +1090,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64(
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -1138,7 +1138,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64(
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -1186,7 +1186,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64(
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle.ll
index 3937cd7b67025a..5568692c8d230b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfle.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfle.ll
@@ -658,7 +658,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16.f16(
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -706,7 +706,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16.f16(
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -754,7 +754,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16.f16(
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -802,7 +802,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16.f16(
define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -850,7 +850,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16.f16(
define <vscale x 16 x i1> @intrinsic_vmfle_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -898,7 +898,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32(
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -946,7 +946,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32(
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -994,7 +994,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32(
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -1042,7 +1042,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32(
define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -1090,7 +1090,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64(
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -1138,7 +1138,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64(
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -1186,7 +1186,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64(
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt.ll
index 26c5eedc6aafd0..0d8868d67e40cd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmflt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmflt.ll
@@ -658,7 +658,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16.f16(
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -706,7 +706,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16.f16(
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -754,7 +754,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16.f16(
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -802,7 +802,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16.f16(
define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -850,7 +850,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16.f16(
define <vscale x 16 x i1> @intrinsic_vmflt_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -898,7 +898,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32(
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -946,7 +946,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32(
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -994,7 +994,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32(
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -1042,7 +1042,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32(
define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -1090,7 +1090,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64(
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -1138,7 +1138,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64(
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -1186,7 +1186,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64(
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne.ll
index 827c7b436af8fa..f15dd74ab6d54a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfne.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfne.ll
@@ -658,7 +658,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16(
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -706,7 +706,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16(
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -754,7 +754,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16(
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -802,7 +802,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16(
define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -850,7 +850,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16(
define <vscale x 16 x i1> @intrinsic_vmfne_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -898,7 +898,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32(
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -946,7 +946,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32(
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -994,7 +994,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32(
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -1042,7 +1042,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32(
define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -1090,7 +1090,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64(
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -1138,7 +1138,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64(
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -1186,7 +1186,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64(
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
index 0ac38775fbe0b0..d57749bbe59b6e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
@@ -412,7 +412,7 @@ declare <vscale x 128 x i8> @llvm.vp.smin.nxv128i8(<vscale x 128 x i8>, <vscale
define <vscale x 128 x i8> @vmin_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -975,7 +975,7 @@ declare <vscale x 32 x i32> @llvm.vp.smin.nxv32i32(<vscale x 32 x i32>, <vscale
define <vscale x 32 x i32> @vmin_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: srli a3, a2, 2
@@ -1036,7 +1036,7 @@ declare i32 @llvm.vscale.i32()
define <vscale x 32 x i32> @vmin_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
; CHECK-LABEL: vmin_vx_nxv32i32_evl_nx8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a3, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
index 06cbc88dc200b4..37bdea7414f283 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
@@ -410,7 +410,7 @@ declare <vscale x 128 x i8> @llvm.vp.umin.nxv128i8(<vscale x 128 x i8>, <vscale
define <vscale x 128 x i8> @vminu_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -973,7 +973,7 @@ declare <vscale x 32 x i32> @llvm.vp.umin.nxv32i32(<vscale x 32 x i32>, <vscale
define <vscale x 32 x i32> @vminu_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: srli a3, a2, 2
@@ -1034,7 +1034,7 @@ declare i32 @llvm.vscale.i32()
define <vscale x 32 x i32> @vminu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
; CHECK-LABEL: vminu_vx_nxv32i32_evl_nx8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a3, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll
index 92d1668967e5ce..2d664fdc6e62ae 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll
@@ -31,7 +31,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -74,7 +74,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1(
define <vscale x 2 x i1> @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -117,7 +117,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1(
define <vscale x 4 x i1> @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1(
define <vscale x 8 x i1> @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -203,7 +203,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1(
define <vscale x 16 x i1> @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -246,7 +246,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1(
define <vscale x 32 x i1> @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -289,7 +289,7 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq.ll
index 4ae487fcf36c53..af26bfd991f50e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmseq.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmseq.ll
@@ -970,7 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1018,7 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1066,7 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1114,7 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1162,7 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1210,7 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1258,7 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1306,7 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1354,7 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1402,7 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1450,7 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1498,7 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1546,7 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1594,7 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1642,7 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1717,7 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1792,7 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1867,7 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1903,7 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1939,7 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1975,7 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2011,7 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2047,7 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2083,7 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmseq_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2119,7 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2155,7 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2191,7 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2227,7 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2263,7 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2299,7 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2335,7 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2371,7 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2407,7 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2443,7 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2479,7 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2515,7 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge.ll
index fa6273b6fd012b..dafe80f492d938 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsge.ll
@@ -971,7 +971,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1020,7 +1020,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1069,7 +1069,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1090,7 +1090,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8_1(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8_1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: li a1, 99
; CHECK-NEXT: vmv1r.v v0, v9
@@ -1158,7 +1158,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1207,7 +1207,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1256,7 +1256,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsge_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1305,7 +1305,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1354,7 +1354,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1403,7 +1403,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1452,7 +1452,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1501,7 +1501,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1550,7 +1550,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1599,7 +1599,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1648,7 +1648,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1697,7 +1697,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1773,7 +1773,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1849,7 +1849,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1925,7 +1925,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1961,7 +1961,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1997,7 +1997,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -2064,7 +2064,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2100,7 +2100,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2136,7 +2136,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2172,7 +2172,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsge_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2208,7 +2208,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2244,7 +2244,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2280,7 +2280,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2316,7 +2316,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2352,7 +2352,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2388,7 +2388,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2424,7 +2424,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2460,7 +2460,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2496,7 +2496,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2532,7 +2532,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2568,7 +2568,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2604,7 +2604,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
index 6cfd0ed7318e7f..b943cc9f9f1164 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
@@ -971,7 +971,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1020,7 +1020,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1069,7 +1069,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1118,7 +1118,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1167,7 +1167,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1216,7 +1216,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1265,7 +1265,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1314,7 +1314,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1363,7 +1363,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1412,7 +1412,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1461,7 +1461,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1510,7 +1510,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1559,7 +1559,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1608,7 +1608,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1657,7 +1657,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1733,7 +1733,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1809,7 +1809,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1885,7 +1885,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1921,7 +1921,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1957,7 +1957,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1993,7 +1993,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2014,7 +2014,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8_1(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8_1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: li a1, 99
; CHECK-NEXT: vmv1r.v v0, v9
@@ -2051,7 +2051,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2087,7 +2087,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2123,7 +2123,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2159,7 +2159,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2258,7 +2258,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2294,7 +2294,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2330,7 +2330,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2366,7 +2366,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2402,7 +2402,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2438,7 +2438,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2474,7 +2474,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2510,7 +2510,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2546,7 +2546,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2582,7 +2582,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
index 668de9b965d6dd..514b1290975f37 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
@@ -970,7 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1018,7 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1066,7 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1114,7 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1162,7 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1210,7 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1258,7 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1306,7 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1354,7 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1402,7 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1450,7 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1498,7 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1546,7 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1594,7 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1642,7 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1717,7 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1792,7 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1867,7 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1903,7 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1939,7 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1975,7 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2011,7 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2047,7 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2083,7 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2119,7 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2155,7 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2191,7 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2227,7 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2263,7 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2299,7 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2335,7 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2371,7 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2407,7 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2443,7 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2479,7 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2515,7 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll
index bece4c9c53f783..3f6f79ec1c5910 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll
@@ -970,7 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1018,7 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1066,7 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1114,7 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1162,7 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1210,7 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1258,7 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1306,7 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1354,7 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1402,7 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1450,7 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1498,7 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1546,7 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1594,7 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1642,7 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1717,7 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1792,7 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1867,7 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1903,7 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1939,7 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1975,7 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2011,7 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2047,7 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2083,7 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2119,7 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2155,7 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2191,7 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2227,7 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2263,7 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2299,7 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2335,7 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2371,7 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2407,7 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2443,7 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2479,7 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2515,7 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll
index ad05e4ec993b2f..4e092a57dc19a7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsif.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll
@@ -31,7 +31,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1(
define <vscale x 1 x i1> @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -74,7 +74,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1(
define <vscale x 2 x i1> @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -117,7 +117,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1(
define <vscale x 4 x i1> @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1(
define <vscale x 8 x i1> @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1_nxv8i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -203,7 +203,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1(
define <vscale x 16 x i1> @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1_nxv16i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -246,7 +246,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1(
define <vscale x 32 x i1> @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1_nxv32i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -289,7 +289,7 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1(
define <vscale x 64 x i1> @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle.ll
index ca78acc94560d7..e81e3b341a179e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsle.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsle.ll
@@ -970,7 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1018,7 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1066,7 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1114,7 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1162,7 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1210,7 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1258,7 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1306,7 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1354,7 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1402,7 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1450,7 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1498,7 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1546,7 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1594,7 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1642,7 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1717,7 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1792,7 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1867,7 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1903,7 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1939,7 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1975,7 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2011,7 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2047,7 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2083,7 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsle_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2119,7 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2155,7 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2191,7 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2227,7 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2263,7 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2299,7 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2335,7 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2371,7 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2407,7 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2443,7 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2479,7 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2515,7 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll
index 5d17abb1044e1f..4cc1ee88c8f652 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll
@@ -970,7 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1018,7 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1066,7 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1114,7 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1162,7 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1210,7 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1258,7 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1306,7 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1354,7 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1402,7 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1450,7 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1498,7 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1546,7 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1594,7 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1642,7 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1717,7 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1792,7 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1867,7 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1903,7 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1939,7 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1975,7 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2011,7 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2047,7 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2083,7 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2119,7 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2155,7 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2191,7 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2227,7 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2263,7 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2299,7 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2335,7 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2371,7 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2407,7 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2443,7 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2479,7 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2515,7 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt.ll
index 6bb8710d42f7b5..a744d951b9f7fa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmslt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmslt.ll
@@ -970,7 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1018,7 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1066,7 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1114,7 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1162,7 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1210,7 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1258,7 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1306,7 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1354,7 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1402,7 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1450,7 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1498,7 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1546,7 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1594,7 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1642,7 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1717,7 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1792,7 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1867,7 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1903,7 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1939,7 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1975,7 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2011,7 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2047,7 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2083,7 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2119,7 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2155,7 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2191,7 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2227,7 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2263,7 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2299,7 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2335,7 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2371,7 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2407,7 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2443,7 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2479,7 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2515,7 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll
index ae981d700a84a4..cf9e8467fa3979 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll
@@ -970,7 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1018,7 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1066,7 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1114,7 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1162,7 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1210,7 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1258,7 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1306,7 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1354,7 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1402,7 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1450,7 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1498,7 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1546,7 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1594,7 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1642,7 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1717,7 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1792,7 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1867,7 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1903,7 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1939,7 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1975,7 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2011,7 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2047,7 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2083,7 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2119,7 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2155,7 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2191,7 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2227,7 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2263,7 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2299,7 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2335,7 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2371,7 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2407,7 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2443,7 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2479,7 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2515,7 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne.ll
index 54fee4a68aca2c..61357a62af5613 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsne.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsne.ll
@@ -970,7 +970,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1018,7 +1018,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1066,7 +1066,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1114,7 +1114,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1162,7 +1162,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1210,7 +1210,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1258,7 +1258,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1306,7 +1306,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1354,7 +1354,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1402,7 +1402,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1450,7 +1450,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1498,7 +1498,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1546,7 +1546,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1594,7 +1594,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1642,7 +1642,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1717,7 +1717,7 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
;
; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
@@ -1792,7 +1792,7 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
;
; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v11, v0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
@@ -1867,7 +1867,7 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
;
; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v13, v0
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
@@ -1903,7 +1903,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -1939,7 +1939,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -1975,7 +1975,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2011,7 +2011,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2047,7 +2047,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2083,7 +2083,7 @@ entry:
define <vscale x 32 x i1> @intrinsic_vmsne_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2119,7 +2119,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2155,7 +2155,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2191,7 +2191,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2227,7 +2227,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2263,7 +2263,7 @@ entry:
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2299,7 +2299,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2335,7 +2335,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2371,7 +2371,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2407,7 +2407,7 @@ entry:
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2443,7 +2443,7 @@ entry:
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2479,7 +2479,7 @@ entry:
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2515,7 +2515,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v13, v0
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll
index e4b9c39fb85083..2dc97931e989ea 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsof.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll
@@ -31,7 +31,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1(
define <vscale x 1 x i1> @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -74,7 +74,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1(
define <vscale x 2 x i1> @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -117,7 +117,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1(
define <vscale x 4 x i1> @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -160,7 +160,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1(
define <vscale x 8 x i1> @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1_nxv8i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -203,7 +203,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1(
define <vscale x 16 x i1> @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1_nxv16i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -246,7 +246,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1(
define <vscale x 32 x i1> @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1_nxv32i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -289,7 +289,7 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1(
define <vscale x 64 x i1> @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
index 588dd80923f8ba..82689b36c71401 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
@@ -49,7 +49,7 @@ define <vscale x 4 x i32> @vadd_same_passthru(<vscale x 4 x i32> %passthru, <vsc
define <vscale x 4 x i32> @unfoldable_diff_avl_unknown(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1, iXLen %vl2) {
; CHECK-LABEL: unfoldable_diff_avl_unknown:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v14, v8
; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
; CHECK-NEXT: vadd.vv v14, v10, v12
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll b/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll
index 219269dc109b46..e3e3a77d436de5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll
@@ -5,7 +5,7 @@
define iXLen @bool_vec(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m, i32 %evl) {
; RV32-LABEL: bool_vec:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v9, v0
; RV32-NEXT: vmv1r.v v0, v8
; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
@@ -18,7 +18,7 @@ define iXLen @bool_vec(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m, i32 %evl) {
;
; RV64-LABEL: bool_vec:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v9, v0
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: srli a0, a0, 32
@@ -37,7 +37,7 @@ define iXLen @bool_vec(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m, i32 %evl) {
define iXLen @bool_vec_zero_poison(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m, i32 %evl) {
; RV32-LABEL: bool_vec_zero_poison:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v9, v0
; RV32-NEXT: vmv1r.v v0, v8
; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
@@ -46,7 +46,7 @@ define iXLen @bool_vec_zero_poison(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m,
;
; RV64-LABEL: bool_vec_zero_poison:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v9, v0
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: srli a0, a0, 32
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-select.ll b/llvm/test/CodeGen/RISCV/rvv/vp-select.ll
index 421afd746cfc07..b9bc2c1a456046 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-select.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-select.ll
@@ -12,7 +12,7 @@ define <vscale x 1 x i64> @all_ones(<vscale x 1 x i64> %true, <vscale x 1 x i64>
define <vscale x 1 x i64> @all_zeroes(<vscale x 1 x i64> %true, <vscale x 1 x i64> %false, i32 %evl) {
; CHECK-LABEL: all_zeroes:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> splat (i1 false), <vscale x 1 x i64> %true, <vscale x 1 x i64> %false, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
index 50170a009e5640..476cc99258ec38 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
@@ -10,7 +10,7 @@ declare <16 x i1> @llvm.experimental.vp.splice.v16i1(<16 x i1>, <16 x i1>, i32,
define <2 x i1> @test_vp_splice_v2i1(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -35,7 +35,7 @@ define <2 x i1> @test_vp_splice_v2i1(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %ev
define <2 x i1> @test_vp_splice_v2i1_negative_offset(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v2i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -60,7 +60,7 @@ define <2 x i1> @test_vp_splice_v2i1_negative_offset(<2 x i1> %va, <2 x i1> %vb,
define <2 x i1> @test_vp_splice_v2i1_masked(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v2i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -86,7 +86,7 @@ define <2 x i1> @test_vp_splice_v2i1_masked(<2 x i1> %va, <2 x i1> %vb, <2 x i1>
define <4 x i1> @test_vp_splice_v4i1(<4 x i1> %va, <4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -111,7 +111,7 @@ define <4 x i1> @test_vp_splice_v4i1(<4 x i1> %va, <4 x i1> %vb, i32 zeroext %ev
define <4 x i1> @test_vp_splice_v4i1_negative_offset(<4 x i1> %va, <4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v4i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -136,7 +136,7 @@ define <4 x i1> @test_vp_splice_v4i1_negative_offset(<4 x i1> %va, <4 x i1> %vb,
define <4 x i1> @test_vp_splice_v4i1_masked(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v4i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -162,7 +162,7 @@ define <4 x i1> @test_vp_splice_v4i1_masked(<4 x i1> %va, <4 x i1> %vb, <4 x i1>
define <8 x i1> @test_vp_splice_v8i1(<8 x i1> %va, <8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -187,7 +187,7 @@ define <8 x i1> @test_vp_splice_v8i1(<8 x i1> %va, <8 x i1> %vb, i32 zeroext %ev
define <8 x i1> @test_vp_splice_v8i1_negative_offset(<8 x i1> %va, <8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v8i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -212,7 +212,7 @@ define <8 x i1> @test_vp_splice_v8i1_negative_offset(<8 x i1> %va, <8 x i1> %vb,
define <8 x i1> @test_vp_splice_v8i1_masked(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v8i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -238,7 +238,7 @@ define <8 x i1> @test_vp_splice_v8i1_masked(<8 x i1> %va, <8 x i1> %vb, <8 x i1>
define <16 x i1> @test_vp_splice_v16i1(<16 x i1> %va, <16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -263,7 +263,7 @@ define <16 x i1> @test_vp_splice_v16i1(<16 x i1> %va, <16 x i1> %vb, i32 zeroext
define <16 x i1> @test_vp_splice_v16i1_negative_offset(<16 x i1> %va, <16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v16i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -288,7 +288,7 @@ define <16 x i1> @test_vp_splice_v16i1_negative_offset(<16 x i1> %va, <16 x i1>
define <16 x i1> @test_vp_splice_v16i1_masked(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v16i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
index 178de322d7e036..0a9101053b5a0c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
@@ -13,7 +13,7 @@ declare <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i
define <vscale x 1 x i1> @test_vp_splice_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -38,7 +38,7 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1(<vscale x 1 x i1> %va, <vscale x
define <vscale x 1 x i1> @test_vp_splice_nxv1i1_negative_offset(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv1i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -63,7 +63,7 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1_negative_offset(<vscale x 1 x i1
define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, <vscale x 1 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv1i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -89,7 +89,7 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <v
define <vscale x 2 x i1> @test_vp_splice_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -114,7 +114,7 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1(<vscale x 2 x i1> %va, <vscale x
define <vscale x 2 x i1> @test_vp_splice_nxv2i1_negative_offset(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv2i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -139,7 +139,7 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1_negative_offset(<vscale x 2 x i1
define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv2i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -165,7 +165,7 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <v
define <vscale x 4 x i1> @test_vp_splice_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -190,7 +190,7 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1(<vscale x 4 x i1> %va, <vscale x
define <vscale x 4 x i1> @test_vp_splice_nxv4i1_negative_offset(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv4i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -215,7 +215,7 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1_negative_offset(<vscale x 4 x i1
define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, <vscale x 4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv4i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -241,7 +241,7 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <v
define <vscale x 8 x i1> @test_vp_splice_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -266,7 +266,7 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1(<vscale x 8 x i1> %va, <vscale x
define <vscale x 8 x i1> @test_vp_splice_nxv8i1_negative_offset(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv8i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -291,7 +291,7 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1_negative_offset(<vscale x 8 x i1
define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, <vscale x 8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv8i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -317,7 +317,7 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <v
define <vscale x 16 x i1> @test_vp_splice_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -342,7 +342,7 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1(<vscale x 16 x i1> %va, <vscal
define <vscale x 16 x i1> @test_vp_splice_nxv16i1_negative_offset(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv16i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -367,7 +367,7 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1_negative_offset(<vscale x 16 x
define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, <vscale x 16 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv16i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -394,7 +394,7 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va,
define <vscale x 32 x i1> @test_vp_splice_nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv32i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -419,7 +419,7 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1(<vscale x 32 x i1> %va, <vscal
define <vscale x 32 x i1> @test_vp_splice_nxv32i1_negative_offset(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv32i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -444,7 +444,7 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1_negative_offset(<vscale x 32 x
define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, <vscale x 32 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv32i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -471,7 +471,7 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va,
define <vscale x 64 x i1> @test_vp_splice_nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv64i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -496,7 +496,7 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1(<vscale x 64 x i1> %va, <vscal
define <vscale x 64 x i1> @test_vp_splice_nxv64i1_negative_offset(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv64i1_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -521,7 +521,7 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1_negative_offset(<vscale x 64 x
define <vscale x 64 x i1> @test_vp_splice_nxv64i1_masked(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, <vscale x 64 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv64i1_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
index 2452b1e8d5f8a5..950afc0eb4b9d7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
@@ -258,7 +258,7 @@ declare <vscale x 32 x i8> @llvm.vp.gather.nxv32i8.nxv32p0(<vscale x 32 x ptr>,
define <vscale x 32 x i8> @vpgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8> %idxs, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_nxv32i8:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v12, v0
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a2, a3, 1
@@ -286,7 +286,7 @@ define <vscale x 32 x i8> @vpgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8
;
; RV64-LABEL: vpgather_baseidx_nxv32i8:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: csrr a2, vlenb
; RV64-NEXT: slli a3, a2, 1
@@ -2459,7 +2459,7 @@ declare <vscale x 16 x double> @llvm.vp.gather.nxv16f64.nxv16p0(<vscale x 16 x p
define <vscale x 16 x double> @vpgather_nxv16f64(<vscale x 16 x ptr> %ptrs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v24, v0
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: sub a2, a0, a1
@@ -2483,7 +2483,7 @@ define <vscale x 16 x double> @vpgather_nxv16f64(<vscale x 16 x ptr> %ptrs, <vsc
;
; RV64-LABEL: vpgather_nxv16f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v24, v0
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: sub a2, a0, a1
@@ -2510,7 +2510,7 @@ define <vscale x 16 x double> @vpgather_nxv16f64(<vscale x 16 x ptr> %ptrs, <vsc
define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_nxv16i16_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v12, v0
; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
; RV32-NEXT: vsext.vf2 v16, v8
@@ -2536,7 +2536,7 @@ define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vs
;
; RV64-LABEL: vpgather_baseidx_nxv16i16_nxv16f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v10
@@ -2570,7 +2570,7 @@ define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vs
define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_sext_nxv16i16_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v12, v0
; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
; RV32-NEXT: vsext.vf2 v16, v8
@@ -2596,7 +2596,7 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base
;
; RV64-LABEL: vpgather_baseidx_sext_nxv16i16_nxv16f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v10
@@ -2631,7 +2631,7 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base
define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv16i16_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV32-NEXT: vmv1r.v v12, v0
; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
; RV32-NEXT: vzext.vf2 v16, v8
@@ -2657,7 +2657,7 @@ define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(ptr %base
;
; RV64-LABEL: vpgather_baseidx_zext_nxv16i16_nxv16f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: vsetvli a2, zero, e32, m8, ta, ma
; RV64-NEXT: vzext.vf2 v16, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpload.ll b/llvm/test/CodeGen/RISCV/rvv/vpload.ll
index d26b6dc9210250..3eb0a9664182bc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpload.ll
@@ -522,7 +522,7 @@ declare <vscale x 16 x double> @llvm.vp.load.nxv16f64.p0(ptr, <vscale x 16 x i1>
define <vscale x 16 x double> @vpload_nxv16f64(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: sub a3, a1, a2
@@ -562,7 +562,7 @@ declare <vscale x 16 x double> @llvm.vector.extract.nxv16f64(<vscale x 17 x doub
define <vscale x 16 x double> @vpload_nxv17f64(ptr %ptr, ptr %out, <vscale x 17 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv17f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: slli a5, a3, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
index 8c4d2e077f31cb..e34bba4b36b940 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
@@ -361,7 +361,7 @@ define <vscale x 128 x i8> @vpmerge_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vmv8r.v v24, v16
; CHECK-NEXT: addi a1, sp, 16
@@ -402,7 +402,7 @@ define <vscale x 128 x i8> @vpmerge_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
define <vscale x 128 x i8> @vpmerge_vx_nxv128i8(i8 %a, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpmerge_vx_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
@@ -431,7 +431,7 @@ define <vscale x 128 x i8> @vpmerge_vx_nxv128i8(i8 %a, <vscale x 128 x i8> %vb,
define <vscale x 128 x i8> @vpmerge_vi_nxv128i8(<vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpmerge_vi_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
index bbd0f8cbd8b5a8..dce6b4f9b46e1a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
@@ -468,7 +468,7 @@ define void @vpstore_nxv17f64(<vscale x 17 x double> %val, ptr %ptr, <vscale x 1
; CHECK-NEXT: slli a3, a3, 3
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
@@ -484,7 +484,7 @@ define void @vpstore_nxv17f64(<vscale x 17 x double> %val, ptr %ptr, <vscale x 1
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a6, a3
; CHECK-NEXT: .LBB36_4:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vl8re64.v v16, (a0)
; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
index 12db6facd35b66..f9bf569ee33716 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
@@ -23,7 +23,7 @@ declare i1 @llvm.vp.reduce.or.nxv1i1(i1, <vscale x 1 x i1>, <vscale x 1 x i1>, i
define zeroext i1 @vpreduce_or_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -40,7 +40,7 @@ declare i1 @llvm.vp.reduce.xor.nxv1i1(i1, <vscale x 1 x i1>, <vscale x 1 x i1>,
define zeroext i1 @vpreduce_xor_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -73,7 +73,7 @@ declare i1 @llvm.vp.reduce.or.nxv2i1(i1, <vscale x 2 x i1>, <vscale x 2 x i1>, i
define zeroext i1 @vpreduce_or_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -90,7 +90,7 @@ declare i1 @llvm.vp.reduce.xor.nxv2i1(i1, <vscale x 2 x i1>, <vscale x 2 x i1>,
define zeroext i1 @vpreduce_xor_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -123,7 +123,7 @@ declare i1 @llvm.vp.reduce.or.nxv4i1(i1, <vscale x 4 x i1>, <vscale x 4 x i1>, i
define zeroext i1 @vpreduce_or_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -140,7 +140,7 @@ declare i1 @llvm.vp.reduce.xor.nxv4i1(i1, <vscale x 4 x i1>, <vscale x 4 x i1>,
define zeroext i1 @vpreduce_xor_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -173,7 +173,7 @@ declare i1 @llvm.vp.reduce.or.nxv8i1(i1, <vscale x 8 x i1>, <vscale x 8 x i1>, i
define zeroext i1 @vpreduce_or_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -190,7 +190,7 @@ declare i1 @llvm.vp.reduce.xor.nxv8i1(i1, <vscale x 8 x i1>, <vscale x 8 x i1>,
define zeroext i1 @vpreduce_xor_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -223,7 +223,7 @@ declare i1 @llvm.vp.reduce.or.nxv16i1(i1, <vscale x 16 x i1>, <vscale x 16 x i1>
define zeroext i1 @vpreduce_or_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -240,7 +240,7 @@ declare i1 @llvm.vp.reduce.xor.nxv16i1(i1, <vscale x 16 x i1>, <vscale x 16 x i1
define zeroext i1 @vpreduce_xor_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -273,7 +273,7 @@ declare i1 @llvm.vp.reduce.or.nxv32i1(i1, <vscale x 32 x i1>, <vscale x 32 x i1>
define zeroext i1 @vpreduce_or_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv32i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -290,7 +290,7 @@ declare i1 @llvm.vp.reduce.xor.nxv32i1(i1, <vscale x 32 x i1>, <vscale x 32 x i1
define zeroext i1 @vpreduce_xor_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv32i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -307,7 +307,7 @@ declare i1 @llvm.vp.reduce.or.nxv40i1(i1, <vscale x 40 x i1>, <vscale x 40 x i1>
define zeroext i1 @vpreduce_or_nxv40i1(i1 zeroext %s, <vscale x 40 x i1> %v, <vscale x 40 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv40i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -340,7 +340,7 @@ declare i1 @llvm.vp.reduce.or.nxv64i1(i1, <vscale x 64 x i1>, <vscale x 64 x i1>
define zeroext i1 @vpreduce_or_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv64i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -357,7 +357,7 @@ declare i1 @llvm.vp.reduce.xor.nxv64i1(i1, <vscale x 64 x i1>, <vscale x 64 x i1
define zeroext i1 @vpreduce_xor_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_nxv64i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -374,7 +374,7 @@ declare i1 @llvm.vp.reduce.or.nxv128i1(i1, <vscale x 128 x i1>, <vscale x 128 x
define zeroext i1 @vpreduce_or_nxv128i1(i1 zeroext %s, <vscale x 128 x i1> %v, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_or_nxv128i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 3
@@ -406,7 +406,7 @@ declare i1 @llvm.vp.reduce.add.nxv1i1(i1, <vscale x 1 x i1>, <vscale x 1 x i1>,
define zeroext i1 @vpreduce_add_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -423,7 +423,7 @@ declare i1 @llvm.vp.reduce.add.nxv2i1(i1, <vscale x 2 x i1>, <vscale x 2 x i1>,
define zeroext i1 @vpreduce_add_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -440,7 +440,7 @@ declare i1 @llvm.vp.reduce.add.nxv4i1(i1, <vscale x 4 x i1>, <vscale x 4 x i1>,
define zeroext i1 @vpreduce_add_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -457,7 +457,7 @@ declare i1 @llvm.vp.reduce.add.nxv8i1(i1, <vscale x 8 x i1>, <vscale x 8 x i1>,
define zeroext i1 @vpreduce_add_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -474,7 +474,7 @@ declare i1 @llvm.vp.reduce.add.nxv16i1(i1, <vscale x 16 x i1>, <vscale x 16 x i1
define zeroext i1 @vpreduce_add_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -491,7 +491,7 @@ declare i1 @llvm.vp.reduce.add.nxv32i1(i1, <vscale x 32 x i1>, <vscale x 32 x i1
define zeroext i1 @vpreduce_add_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv32i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -508,7 +508,7 @@ declare i1 @llvm.vp.reduce.add.nxv64i1(i1, <vscale x 64 x i1>, <vscale x 64 x i1
define zeroext i1 @vpreduce_add_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_add_nxv64i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -638,7 +638,7 @@ declare i1 @llvm.vp.reduce.smin.nxv1i1(i1, <vscale x 1 x i1>, <vscale x 1 x i1>,
define zeroext i1 @vpreduce_smin_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -655,7 +655,7 @@ declare i1 @llvm.vp.reduce.smin.nxv2i1(i1, <vscale x 2 x i1>, <vscale x 2 x i1>,
define zeroext i1 @vpreduce_smin_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -672,7 +672,7 @@ declare i1 @llvm.vp.reduce.smin.nxv4i1(i1, <vscale x 4 x i1>, <vscale x 4 x i1>,
define zeroext i1 @vpreduce_smin_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -689,7 +689,7 @@ declare i1 @llvm.vp.reduce.smin.nxv8i1(i1, <vscale x 8 x i1>, <vscale x 8 x i1>,
define zeroext i1 @vpreduce_smin_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -706,7 +706,7 @@ declare i1 @llvm.vp.reduce.smin.nxv16i1(i1, <vscale x 16 x i1>, <vscale x 16 x i
define zeroext i1 @vpreduce_smin_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -723,7 +723,7 @@ declare i1 @llvm.vp.reduce.smin.nxv32i1(i1, <vscale x 32 x i1>, <vscale x 32 x i
define zeroext i1 @vpreduce_smin_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv32i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -740,7 +740,7 @@ declare i1 @llvm.vp.reduce.smin.nxv64i1(i1, <vscale x 64 x i1>, <vscale x 64 x i
define zeroext i1 @vpreduce_smin_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_smin_nxv64i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -757,7 +757,7 @@ declare i1 @llvm.vp.reduce.umax.nxv1i1(i1, <vscale x 1 x i1>, <vscale x 1 x i1>,
define zeroext i1 @vpreduce_umax_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -774,7 +774,7 @@ declare i1 @llvm.vp.reduce.umax.nxv2i1(i1, <vscale x 2 x i1>, <vscale x 2 x i1>,
define zeroext i1 @vpreduce_umax_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -791,7 +791,7 @@ declare i1 @llvm.vp.reduce.umax.nxv4i1(i1, <vscale x 4 x i1>, <vscale x 4 x i1>,
define zeroext i1 @vpreduce_umax_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -808,7 +808,7 @@ declare i1 @llvm.vp.reduce.umax.nxv8i1(i1, <vscale x 8 x i1>, <vscale x 8 x i1>,
define zeroext i1 @vpreduce_umax_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -825,7 +825,7 @@ declare i1 @llvm.vp.reduce.umax.nxv16i1(i1, <vscale x 16 x i1>, <vscale x 16 x i
define zeroext i1 @vpreduce_umax_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -842,7 +842,7 @@ declare i1 @llvm.vp.reduce.umax.nxv32i1(i1, <vscale x 32 x i1>, <vscale x 32 x i
define zeroext i1 @vpreduce_umax_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv32i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -859,7 +859,7 @@ declare i1 @llvm.vp.reduce.umax.nxv64i1(i1, <vscale x 64 x i1>, <vscale x 64 x i
define zeroext i1 @vpreduce_umax_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_nxv64i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
index 04e580127117ce..56b62e090dd81b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
@@ -22,7 +22,7 @@ define internal void @foo(<vscale x 1 x i16> %v15, <vscale x 1 x i16> %0, <vscal
; NOSUBREG-NEXT: .LBB0_1: # %loopIR3.i.i
; NOSUBREG-NEXT: # =>This Inner Loop Header: Depth=1
; NOSUBREG-NEXT: vl1r.v v9, (zero)
-; NOSUBREG-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; NOSUBREG-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; NOSUBREG-NEXT: vmv1r.v v13, v12
; NOSUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; NOSUBREG-NEXT: vrgatherei16.vv v13, v9, v10
@@ -43,7 +43,7 @@ define internal void @foo(<vscale x 1 x i16> %v15, <vscale x 1 x i16> %0, <vscal
; SUBREG-NEXT: .LBB0_1: # %loopIR3.i.i
; SUBREG-NEXT: # =>This Inner Loop Header: Depth=1
; SUBREG-NEXT: vl1r.v v9, (zero)
-; SUBREG-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; SUBREG-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; SUBREG-NEXT: vmv1r.v v13, v12
; SUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; SUBREG-NEXT: vrgatherei16.vv v13, v9, v10
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
index 56d40fbba6807a..2c89fb75f57676 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
@@ -572,7 +572,7 @@ declare <vscale x 128 x i8> @llvm.vp.sadd.sat.nxv128i8(<vscale x 128 x i8>, <vsc
define <vscale x 128 x i8> @vsadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsadd_vi_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
@@ -1351,7 +1351,7 @@ declare <vscale x 32 x i32> @llvm.vp.sadd.sat.nxv32i32(<vscale x 32 x i32>, <vsc
define <vscale x 32 x i32> @vsadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsadd_vi_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
index 63277f5be7aa8d..fee38b15fea78b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
@@ -571,7 +571,7 @@ declare <vscale x 128 x i8> @llvm.vp.uadd.sat.nxv128i8(<vscale x 128 x i8>, <vsc
define <vscale x 128 x i8> @vsaddu_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsaddu_vi_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
@@ -1350,7 +1350,7 @@ declare <vscale x 32 x i32> @llvm.vp.uadd.sat.nxv32i32(<vscale x 32 x i32>, <vsc
define <vscale x 32 x i32> @vsaddu_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsaddu_vi_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-bf16.ll
index bbde5b015d4b7b..7c9c8b5c17ae6a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-bf16.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-bf16.ll
@@ -126,7 +126,7 @@ define <vscale x 8 x bfloat> @vmerge_truelhs_nxv8bf16_0(<vscale x 8 x bfloat> %v
define <vscale x 8 x bfloat> @vmerge_falselhs_nxv8bf16_0(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) {
; CHECK-LABEL: vmerge_falselhs_nxv8bf16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%vc = select <vscale x 8 x i1> zeroinitializer, <vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
index 521e4be18f1dda..39678382492f4d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
@@ -175,7 +175,7 @@ define <vscale x 8 x half> @vmerge_truelhs_nxv8f16_0(<vscale x 8 x half> %va, <v
define <vscale x 8 x half> @vmerge_falselhs_nxv8f16_0(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
; CHECK-LABEL: vmerge_falselhs_nxv8f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%vc = select <vscale x 8 x i1> zeroinitializer, <vscale x 8 x half> %va, <vscale x 8 x half> %vb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll
index 51b735717017e3..421ed62053a7f7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll
@@ -803,7 +803,7 @@ define <vscale x 8 x i64> @vmerge_truelhs_nxv8i64_0(<vscale x 8 x i64> %va, <vsc
define <vscale x 8 x i64> @vmerge_falselhs_nxv8i64_0(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
; CHECK-LABEL: vmerge_falselhs_nxv8i64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%vc = select <vscale x 8 x i1> zeroinitializer, <vscale x 8 x i64> %va, <vscale x 8 x i64> %vb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
index 38c4755e48b862..4fa0e670f8456b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
@@ -362,7 +362,7 @@ define <vscale x 32 x i32> @select_nxv32i32(<vscale x 32 x i1> %a, <vscale x 32
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: slli a4, a3, 3
@@ -422,7 +422,7 @@ define <vscale x 32 x i32> @select_evl_nxv32i32(<vscale x 32 x i1> %a, <vscale x
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a3, a1, 3
@@ -712,7 +712,7 @@ define <vscale x 16 x double> @select_nxv16f64(<vscale x 16 x i1> %a, <vscale x
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a3, a1, 3
@@ -838,7 +838,7 @@ define <vscale x 2 x i1> @select_cond_x_cond(<vscale x 2 x i1> %x, <vscale x 2 x
define <vscale x 2 x i1> @select_undef_T_F(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, i32 zeroext %evl) {
; CHECK-LABEL: select_undef_T_F:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> poison, <vscale x 2 x i1> %x, <vscale x 2 x i1> %y, i32 %evl)
@@ -856,7 +856,7 @@ define <vscale x 2 x i1> @select_undef_undef_F(<vscale x 2 x i1> %x, i32 zeroext
define <vscale x 2 x i1> @select_unknown_undef_F(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, i32 zeroext %evl) {
; CHECK-LABEL: select_unknown_undef_F:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> %x, <vscale x 2 x i1> undef, <vscale x 2 x i1> %y, i32 %evl)
@@ -866,7 +866,7 @@ define <vscale x 2 x i1> @select_unknown_undef_F(<vscale x 2 x i1> %x, <vscale x
define <vscale x 2 x i1> @select_unknown_T_undef(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, i32 zeroext %evl) {
; CHECK-LABEL: select_unknown_T_undef:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, <vscale x 2 x i1> poison, i32 %evl)
@@ -876,7 +876,7 @@ define <vscale x 2 x i1> @select_unknown_T_undef(<vscale x 2 x i1> %x, <vscale x
define <vscale x 2 x i1> @select_false_T_F(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, <vscale x 2 x i1> %z, i32 zeroext %evl) {
; CHECK-LABEL: select_false_T_F:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: ret
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> zeroinitializer, <vscale x 2 x i1> %y, <vscale x 2 x i1> %z, i32 %evl)
@@ -886,7 +886,7 @@ define <vscale x 2 x i1> @select_false_T_F(<vscale x 2 x i1> %x, <vscale x 2 x i
define <vscale x 2 x i1> @select_unknown_T_T(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, i32 zeroext %evl) {
; CHECK-LABEL: select_unknown_T_T:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
%a = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> %x, <vscale x 2 x i1> %y, <vscale x 2 x i1> %y, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll
index c7446bbf2cbcde..b7144cbca81d59 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll
@@ -18,7 +18,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
define <2 x double> @fixed_length(<2 x double> %a, <2 x double> %b) nounwind {
; CHECK-LABEL: fixed_length:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: # kill: def $v11 killed $v10
; CHECK-NEXT: # kill: def $v9 killed $v8
@@ -37,7 +37,7 @@ entry:
define <vscale x 1 x double> @scalable(<vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
; CHECK-LABEL: scalable:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: # implicit-def: $v9
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
@@ -55,7 +55,7 @@ entry:
define <vscale x 1 x double> @intrinsic_same_vlmax(<vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
; CHECK-LABEL: intrinsic_same_vlmax:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
; CHECK-NEXT: # implicit-def: $v9
@@ -84,7 +84,7 @@ entry:
define <vscale x 1 x double> @intrinsic_same_avl_imm(<vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
; CHECK-LABEL: intrinsic_same_avl_imm:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vsetivli a0, 2, e32, mf2, ta, ma
; CHECK-NEXT: # implicit-def: $v9
@@ -112,7 +112,7 @@ entry:
define <vscale x 1 x double> @intrinsic_same_avl_reg(i64 %avl, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
; CHECK-LABEL: intrinsic_same_avl_reg:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, ma
; CHECK-NEXT: # implicit-def: $v9
@@ -140,7 +140,7 @@ entry:
define <vscale x 1 x double> @intrinsic_diff_avl_reg(i64 %avl, i64 %avl2, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
; CHECK-LABEL: intrinsic_diff_avl_reg:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, ma
; CHECK-NEXT: # implicit-def: $v9
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
index f25da055467925..7328fd407fb1e2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
@@ -377,7 +377,7 @@ entry:
define <vscale x 1 x double> @test19(<vscale x 1 x double> %a, double %b) nounwind {
; CHECK-LABEL: test19:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll
index 378cdcfca07260..06bcfcc24850be 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll
@@ -151,7 +151,7 @@ declare <vscale x 32 x i32> @llvm.vp.sext.nxv32i32.nxv32i8(<vscale x 32 x i8>, <
define <vscale x 32 x i32> @vsext_nxv32i8_nxv32i32(<vscale x 32 x i8> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vsext_nxv32i8_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
index 776cd977661d56..6891f9e9c99ff3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
@@ -508,7 +508,7 @@ declare <vscale x 32 x half> @llvm.vp.sitofp.nxv32f16.nxv32i32(<vscale x 32 x i3
define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vsitofp_nxv32f16_nxv32i32:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v24, v0
; ZVFH-NEXT: csrr a1, vlenb
; ZVFH-NEXT: srli a2, a1, 2
@@ -533,7 +533,7 @@ define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
;
; ZVFHMIN-LABEL: vsitofp_nxv32f16_nxv32i32:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: srli a2, a1, 2
@@ -568,7 +568,7 @@ declare <vscale x 32 x float> @llvm.vp.sitofp.nxv32f32.nxv32i32(<vscale x 32 x i
define <vscale x 32 x float> @vsitofp_nxv32f32_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsitofp_nxv32f32_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
index 5c22b46621a77f..93ab9cdb03631e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
@@ -590,7 +590,7 @@ declare <vscale x 128 x i8> @llvm.vp.ssub.sat.nxv128i8(<vscale x 128 x i8>, <vsc
define <vscale x 128 x i8> @vssub_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vssub_vi_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
@@ -1393,7 +1393,7 @@ declare <vscale x 32 x i32> @llvm.vp.ssub.sat.nxv32i32(<vscale x 32 x i32>, <vsc
define <vscale x 32 x i32> @vssub_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vssub_vi_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
index 49f72975c4c495..f1fb838eec97c8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
@@ -588,7 +588,7 @@ declare <vscale x 128 x i8> @llvm.vp.usub.sat.nxv128i8(<vscale x 128 x i8>, <vsc
define <vscale x 128 x i8> @vssubu_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vssubu_vi_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
@@ -1391,7 +1391,7 @@ declare <vscale x 32 x i32> @llvm.vp.usub.sat.nxv32i32(<vscale x 32 x i32>, <vsc
define <vscale x 32 x i32> @vssubu_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vssubu_vi_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
index cc0390a9238eb9..d680c0762e4010 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
@@ -157,7 +157,7 @@ declare <vscale x 15 x i16> @llvm.vp.trunc.nxv15i16.nxv15i64(<vscale x 15 x i64>
define <vscale x 15 x i16> @vtrunc_nxv15i16_nxv15i64(<vscale x 15 x i64> %a, <vscale x 15 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv15i16_nxv15i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
@@ -215,7 +215,7 @@ declare <vscale x 32 x i7> @llvm.vp.trunc.nxv32i7.nxv32i32(<vscale x 32 x i32>,
define <vscale x 32 x i7> @vtrunc_nxv32i7_nxv32i32(<vscale x 32 x i32> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv32i7_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -250,7 +250,7 @@ declare <vscale x 32 x i8> @llvm.vp.trunc.nxv32i8.nxv32i32(<vscale x 32 x i32>,
define <vscale x 32 x i8> @vtrunc_nxv32i8_nxv32i32(<vscale x 32 x i32> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv32i8_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
@@ -291,7 +291,7 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
index e061f074db5bd5..cd521dee043dd2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
@@ -500,7 +500,7 @@ declare <vscale x 32 x half> @llvm.vp.uitofp.nxv32f16.nxv32i32(<vscale x 32 x i3
define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vuitofp_nxv32f16_nxv32i32:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFH-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFH-NEXT: vmv1r.v v24, v0
; ZVFH-NEXT: csrr a1, vlenb
; ZVFH-NEXT: srli a2, a1, 2
@@ -525,7 +525,7 @@ define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
;
; ZVFHMIN-LABEL: vuitofp_nxv32f16_nxv32i32:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; ZVFHMIN-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: srli a2, a1, 2
@@ -560,7 +560,7 @@ declare <vscale x 32 x float> @llvm.vp.uitofp.nxv32f32.nxv32i32(<vscale x 32 x i
define <vscale x 32 x float> @vuitofp_nxv32f32_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vuitofp_nxv32f32_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll
index cddc16a058eeaa..907f5c83b8b19b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll
@@ -151,7 +151,7 @@ declare <vscale x 32 x i32> @llvm.vp.zext.nxv32i32.nxv32i8(<vscale x 32 x i8>, <
define <vscale x 32 x i32> @vzext_nxv32i8_nxv32i32(<vscale x 32 x i8> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vzext_nxv32i8_nxv32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 0, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
More information about the llvm-commits
mailing list