[llvm] ecb855a - [RISCV] Reduce LMUL for vector extracts
Philip Reames via llvm-commits
llvm-commits at lists.llvm.org
Tue Aug 22 07:36:28 PDT 2023
Author: Philip Reames
Date: 2023-08-22T07:36:17-07:00
New Revision: ecb855a5a8c5dd9d46ca85041d7fe987fa73ba7c
URL: https://github.com/llvm/llvm-project/commit/ecb855a5a8c5dd9d46ca85041d7fe987fa73ba7c
DIFF: https://github.com/llvm/llvm-project/commit/ecb855a5a8c5dd9d46ca85041d7fe987fa73ba7c.diff
LOG: [RISCV] Reduce LMUL for vector extracts
If we have a known (or bounded) index which definitely fits in a smaller LMUL register group size, we can reduce the LMUL of the slide and extract instructions. This loosens constraints on register allocation, and allows the hardware to do less work, at the potential cost of some additional VTYPE toggles. In practice, we appear (after prior patches) to do a decent job of eliminating the additional VTYPE toggles in most cases.
Differential Revision: https://reviews.llvm.org/D158460
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll
llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 2c3ab3d215e7d3..536cbcda6da28d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -7198,6 +7198,34 @@ SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
}
+ // Reduce the LMUL of our slidedown and vmv.x.s to the smallest LMUL which
+ // contains our index.
+ std::optional<uint64_t> MaxIdx;
+ if (VecVT.isFixedLengthVector())
+ MaxIdx = VecVT.getVectorNumElements() - 1;
+ if (auto *IdxC = dyn_cast<ConstantSDNode>(Idx))
+ MaxIdx = IdxC->getZExtValue();
+ if (MaxIdx) {
+ const unsigned EltSize = ContainerVT.getScalarSizeInBits();
+ const unsigned VectorBitsMin = Subtarget.getRealMinVLen();
+ const unsigned MinVLMAX = VectorBitsMin/EltSize;
+ MVT SmallerVT;
+ if (*MaxIdx < MinVLMAX)
+ SmallerVT = getLMUL1VT(ContainerVT);
+ else if (*MaxIdx < MinVLMAX * 2)
+ SmallerVT = getLMUL1VT(ContainerVT)
+ .getDoubleNumVectorElementsVT();
+ else if (*MaxIdx < MinVLMAX * 4)
+ SmallerVT = getLMUL1VT(ContainerVT)
+ .getDoubleNumVectorElementsVT()
+ .getDoubleNumVectorElementsVT();
+ if (SmallerVT.isValid() && ContainerVT.bitsGT(SmallerVT)) {
+ ContainerVT = SmallerVT;
+ Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ContainerVT, Vec,
+ DAG.getConstant(0, DL, XLenVT));
+ }
+ }
+
// If the index is 0, the vector is already in the right position.
if (!isNullConstant(Idx)) {
// Use a VL of 1 to avoid processing more elements than we need.
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
index b0c46a9a007e3b..36dfd631b7664f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
@@ -103,7 +103,7 @@ define half @extractelt_nxv4f16_idx(<vscale x 4 x half> %v, i32 zeroext %idx) {
define half @extractelt_nxv8f16_0(<vscale x 8 x half> %v) {
; CHECK-LABEL: extractelt_nxv8f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x half> %v, i32 0
@@ -113,7 +113,7 @@ define half @extractelt_nxv8f16_0(<vscale x 8 x half> %v) {
define half @extractelt_nxv8f16_imm(<vscale x 8 x half> %v) {
; CHECK-LABEL: extractelt_nxv8f16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
@@ -135,7 +135,7 @@ define half @extractelt_nxv8f16_idx(<vscale x 8 x half> %v, i32 zeroext %idx) {
define half @extractelt_nxv16f16_0(<vscale x 16 x half> %v) {
; CHECK-LABEL: extractelt_nxv16f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x half> %v, i32 0
@@ -145,7 +145,7 @@ define half @extractelt_nxv16f16_0(<vscale x 16 x half> %v) {
define half @extractelt_nxv16f16_imm(<vscale x 16 x half> %v) {
; CHECK-LABEL: extractelt_nxv16f16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
@@ -167,7 +167,7 @@ define half @extractelt_nxv16f16_idx(<vscale x 16 x half> %v, i32 zeroext %idx)
define half @extractelt_nxv32f16_0(<vscale x 32 x half> %v) {
; CHECK-LABEL: extractelt_nxv32f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x half> %v, i32 0
@@ -177,7 +177,7 @@ define half @extractelt_nxv32f16_0(<vscale x 32 x half> %v) {
define half @extractelt_nxv32f16_imm(<vscale x 32 x half> %v) {
; CHECK-LABEL: extractelt_nxv32f16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
@@ -263,7 +263,7 @@ define float @extractelt_nxv2f32_idx(<vscale x 2 x float> %v, i32 zeroext %idx)
define float @extractelt_nxv4f32_0(<vscale x 4 x float> %v) {
; CHECK-LABEL: extractelt_nxv4f32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x float> %v, i32 0
@@ -273,7 +273,7 @@ define float @extractelt_nxv4f32_0(<vscale x 4 x float> %v) {
define float @extractelt_nxv4f32_imm(<vscale x 4 x float> %v) {
; CHECK-LABEL: extractelt_nxv4f32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
@@ -295,7 +295,7 @@ define float @extractelt_nxv4f32_idx(<vscale x 4 x float> %v, i32 zeroext %idx)
define float @extractelt_nxv8f32_0(<vscale x 8 x float> %v) {
; CHECK-LABEL: extractelt_nxv8f32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x float> %v, i32 0
@@ -305,7 +305,7 @@ define float @extractelt_nxv8f32_0(<vscale x 8 x float> %v) {
define float @extractelt_nxv8f32_imm(<vscale x 8 x float> %v) {
; CHECK-LABEL: extractelt_nxv8f32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
@@ -327,7 +327,7 @@ define float @extractelt_nxv8f32_idx(<vscale x 8 x float> %v, i32 zeroext %idx)
define float @extractelt_nxv16f32_0(<vscale x 16 x float> %v) {
; CHECK-LABEL: extractelt_nxv16f32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x float> %v, i32 0
@@ -337,7 +337,7 @@ define float @extractelt_nxv16f32_0(<vscale x 16 x float> %v) {
define float @extractelt_nxv16f32_imm(<vscale x 16 x float> %v) {
; CHECK-LABEL: extractelt_nxv16f32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
@@ -391,7 +391,7 @@ define double @extractelt_nxv1f64_idx(<vscale x 1 x double> %v, i32 zeroext %idx
define double @extractelt_nxv2f64_0(<vscale x 2 x double> %v) {
; CHECK-LABEL: extractelt_nxv2f64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x double> %v, i32 0
@@ -423,7 +423,7 @@ define double @extractelt_nxv2f64_idx(<vscale x 2 x double> %v, i32 zeroext %idx
define double @extractelt_nxv4f64_0(<vscale x 4 x double> %v) {
; CHECK-LABEL: extractelt_nxv4f64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x double> %v, i32 0
@@ -433,7 +433,7 @@ define double @extractelt_nxv4f64_0(<vscale x 4 x double> %v) {
define double @extractelt_nxv4f64_imm(<vscale x 4 x double> %v) {
; CHECK-LABEL: extractelt_nxv4f64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
@@ -455,7 +455,7 @@ define double @extractelt_nxv4f64_idx(<vscale x 4 x double> %v, i32 zeroext %idx
define double @extractelt_nxv8f64_0(<vscale x 8 x double> %v) {
; CHECK-LABEL: extractelt_nxv8f64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x double> %v, i32 0
@@ -465,7 +465,7 @@ define double @extractelt_nxv8f64_0(<vscale x 8 x double> %v) {
define double @extractelt_nxv8f64_imm(<vscale x 8 x double> %v) {
; CHECK-LABEL: extractelt_nxv8f64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
@@ -488,7 +488,7 @@ define void @store_extractelt_nxv8f64(<vscale x 8 x double>* %x, double* %p) {
; CHECK-LABEL: store_extractelt_nxv8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vl8re64.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vse64.v v8, (a1)
; CHECK-NEXT: ret
@@ -502,7 +502,7 @@ define void @store_vfmv_f_s_nxv8f64(<vscale x 8 x double>* %x, double* %p) {
; CHECK-LABEL: store_vfmv_f_s_nxv8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vl8re64.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-NEXT: vse64.v v8, (a1)
; CHECK-NEXT: ret
%a = load <vscale x 8 x double>, <vscale x 8 x double>* %x
@@ -516,7 +516,7 @@ declare double @llvm.riscv.vfmv.f.s.nxv8f64(<vscale x 8 x double>)
define float @extractelt_fadd_nxv4f32_splat(<vscale x 4 x float> %x) {
; CHECK-LABEL: extractelt_fadd_nxv4f32_splat:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vfmv.f.s fa5, v8
; CHECK-NEXT: lui a0, 263168
@@ -533,7 +533,7 @@ define float @extractelt_fadd_nxv4f32_splat(<vscale x 4 x float> %x) {
define float @extractelt_fsub_nxv4f32_splat(<vscale x 4 x float> %x) {
; CHECK-LABEL: extractelt_fsub_nxv4f32_splat:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vfmv.f.s fa5, v8
; CHECK-NEXT: lui a0, 263168
@@ -550,7 +550,7 @@ define float @extractelt_fsub_nxv4f32_splat(<vscale x 4 x float> %x) {
define float @extractelt_fmul_nxv4f32_splat(<vscale x 4 x float> %x) {
; CHECK-LABEL: extractelt_fmul_nxv4f32_splat:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 3
; CHECK-NEXT: vfmv.f.s fa5, v8
; CHECK-NEXT: lui a0, 263168
@@ -567,7 +567,7 @@ define float @extractelt_fmul_nxv4f32_splat(<vscale x 4 x float> %x) {
define float @extractelt_fdiv_nxv4f32_splat(<vscale x 4 x float> %x) {
; CHECK-LABEL: extractelt_fdiv_nxv4f32_splat:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa5, v8
; CHECK-NEXT: lui a0, 263168
; CHECK-NEXT: fmv.w.x fa4, a0
@@ -583,7 +583,7 @@ define float @extractelt_fdiv_nxv4f32_splat(<vscale x 4 x float> %x) {
define double @extractelt_nxv16f64_0(<vscale x 16 x double> %v) {
; CHECK-LABEL: extractelt_nxv16f64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x double> %v, i32 0
@@ -663,7 +663,7 @@ define double @extractelt_nxv16f64_neg1(<vscale x 16 x double> %v) {
define double @extractelt_nxv16f64_imm(<vscale x 16 x double> %v) {
; CHECK-LABEL: extractelt_nxv16f64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
index cafe2957c7687e..fd2f89e26e5980 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
@@ -136,7 +136,7 @@ define signext i8 @extractelt_nxv8i8_idx(<vscale x 8 x i8> %v, i32 %idx) {
define signext i8 @extractelt_nxv16i8_0(<vscale x 16 x i8> %v) {
; CHECK-LABEL: extractelt_nxv16i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i8> %v, i32 0
@@ -146,7 +146,7 @@ define signext i8 @extractelt_nxv16i8_0(<vscale x 16 x i8> %v) {
define signext i8 @extractelt_nxv16i8_imm(<vscale x 16 x i8> %v) {
; CHECK-LABEL: extractelt_nxv16i8_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -168,7 +168,7 @@ define signext i8 @extractelt_nxv16i8_idx(<vscale x 16 x i8> %v, i32 %idx) {
define signext i8 @extractelt_nxv32i8_0(<vscale x 32 x i8> %v) {
; CHECK-LABEL: extractelt_nxv32i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i8> %v, i32 0
@@ -178,7 +178,7 @@ define signext i8 @extractelt_nxv32i8_0(<vscale x 32 x i8> %v) {
define signext i8 @extractelt_nxv32i8_imm(<vscale x 32 x i8> %v) {
; CHECK-LABEL: extractelt_nxv32i8_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -200,7 +200,7 @@ define signext i8 @extractelt_nxv32i8_idx(<vscale x 32 x i8> %v, i32 %idx) {
define signext i8 @extractelt_nxv64i8_0(<vscale x 64 x i8> %v) {
; CHECK-LABEL: extractelt_nxv64i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 64 x i8> %v, i32 0
@@ -210,7 +210,7 @@ define signext i8 @extractelt_nxv64i8_0(<vscale x 64 x i8> %v) {
define signext i8 @extractelt_nxv64i8_imm(<vscale x 64 x i8> %v) {
; CHECK-LABEL: extractelt_nxv64i8_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -328,7 +328,7 @@ define signext i16 @extractelt_nxv4i16_idx(<vscale x 4 x i16> %v, i32 %idx) {
define signext i16 @extractelt_nxv8i16_0(<vscale x 8 x i16> %v) {
; CHECK-LABEL: extractelt_nxv8i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i16> %v, i32 0
@@ -338,7 +338,7 @@ define signext i16 @extractelt_nxv8i16_0(<vscale x 8 x i16> %v) {
define signext i16 @extractelt_nxv8i16_imm(<vscale x 8 x i16> %v) {
; CHECK-LABEL: extractelt_nxv8i16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -360,7 +360,7 @@ define signext i16 @extractelt_nxv8i16_idx(<vscale x 8 x i16> %v, i32 %idx) {
define signext i16 @extractelt_nxv16i16_0(<vscale x 16 x i16> %v) {
; CHECK-LABEL: extractelt_nxv16i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i16> %v, i32 0
@@ -370,7 +370,7 @@ define signext i16 @extractelt_nxv16i16_0(<vscale x 16 x i16> %v) {
define signext i16 @extractelt_nxv16i16_imm(<vscale x 16 x i16> %v) {
; CHECK-LABEL: extractelt_nxv16i16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -392,7 +392,7 @@ define signext i16 @extractelt_nxv16i16_idx(<vscale x 16 x i16> %v, i32 %idx) {
define signext i16 @extractelt_nxv32i16_0(<vscale x 32 x i16> %v) {
; CHECK-LABEL: extractelt_nxv32i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i16> %v, i32 0
@@ -402,7 +402,7 @@ define signext i16 @extractelt_nxv32i16_0(<vscale x 32 x i16> %v) {
define signext i16 @extractelt_nxv32i16_imm(<vscale x 32 x i16> %v) {
; CHECK-LABEL: extractelt_nxv32i16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -488,7 +488,7 @@ define i32 @extractelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 %idx) {
define i32 @extractelt_nxv4i32_0(<vscale x 4 x i32> %v) {
; CHECK-LABEL: extractelt_nxv4i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i32> %v, i32 0
@@ -498,7 +498,7 @@ define i32 @extractelt_nxv4i32_0(<vscale x 4 x i32> %v) {
define i32 @extractelt_nxv4i32_imm(<vscale x 4 x i32> %v) {
; CHECK-LABEL: extractelt_nxv4i32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -520,7 +520,7 @@ define i32 @extractelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 %idx) {
define i32 @extractelt_nxv8i32_0(<vscale x 8 x i32> %v) {
; CHECK-LABEL: extractelt_nxv8i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i32> %v, i32 0
@@ -530,7 +530,7 @@ define i32 @extractelt_nxv8i32_0(<vscale x 8 x i32> %v) {
define i32 @extractelt_nxv8i32_imm(<vscale x 8 x i32> %v) {
; CHECK-LABEL: extractelt_nxv8i32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -552,7 +552,7 @@ define i32 @extractelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 %idx) {
define i32 @extractelt_nxv16i32_0(<vscale x 16 x i32> %v) {
; CHECK-LABEL: extractelt_nxv16i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i32> %v, i32 0
@@ -562,7 +562,7 @@ define i32 @extractelt_nxv16i32_0(<vscale x 16 x i32> %v) {
define i32 @extractelt_nxv16i32_imm(<vscale x 16 x i32> %v) {
; CHECK-LABEL: extractelt_nxv16i32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -748,7 +748,7 @@ define i64 @extractelt_nxv8i64_idx(<vscale x 8 x i64> %v, i32 %idx) {
define i32 @extractelt_add_nxv4i32_splat(<vscale x 4 x i32> %x) {
; CHECK-LABEL: extractelt_add_nxv4i32_splat:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: addi a0, a0, 3
@@ -763,7 +763,7 @@ define i32 @extractelt_add_nxv4i32_splat(<vscale x 4 x i32> %x) {
define i32 @extractelt_sub_nxv4i32_splat(<vscale x 4 x i32> %x) {
; CHECK-LABEL: extractelt_sub_nxv4i32_splat:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: li a1, 3
@@ -782,14 +782,14 @@ define i32 @extractelt_mul_nxv4i32_splat(<vscale x 4 x i32> %x) {
; RV32NOM-NEXT: li a0, 3
; RV32NOM-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; RV32NOM-NEXT: vmul.vx v8, v8, a0
-; RV32NOM-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32NOM-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32NOM-NEXT: vslidedown.vi v8, v8, 3
; RV32NOM-NEXT: vmv.x.s a0, v8
; RV32NOM-NEXT: ret
;
; RV32M-LABEL: extractelt_mul_nxv4i32_splat:
; RV32M: # %bb.0:
-; RV32M-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32M-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32M-NEXT: vslidedown.vi v8, v8, 3
; RV32M-NEXT: vmv.x.s a0, v8
; RV32M-NEXT: slli a1, a0, 1
@@ -816,7 +816,7 @@ define i32 @extractelt_sdiv_nxv4i32_splat(<vscale x 4 x i32> %x) {
;
; RV32M-LABEL: extractelt_sdiv_nxv4i32_splat:
; RV32M: # %bb.0:
-; RV32M-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32M-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32M-NEXT: vmv.x.s a0, v8
; RV32M-NEXT: lui a1, 349525
; RV32M-NEXT: addi a1, a1, 1366
@@ -845,7 +845,7 @@ define i32 @extractelt_udiv_nxv4i32_splat(<vscale x 4 x i32> %x) {
;
; RV32M-LABEL: extractelt_udiv_nxv4i32_splat:
; RV32M: # %bb.0:
-; RV32M-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32M-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32M-NEXT: vmv.x.s a0, v8
; RV32M-NEXT: lui a1, 349525
; RV32M-NEXT: addi a1, a1, 1366
@@ -863,7 +863,7 @@ define i32 @extractelt_udiv_nxv4i32_splat(<vscale x 4 x i32> %x) {
define i32 @extractelt_nxv32i32_0(<vscale x 32 x i32> %v) {
; CHECK-LABEL: extractelt_nxv32i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i32> %v, i32 0
@@ -906,7 +906,7 @@ define i32 @extractelt_nxv32i32_neg1(<vscale x 32 x i32> %v) {
define i32 @extractelt_nxv32i32_imm(<vscale x 32 x i32> %v) {
; CHECK-LABEL: extractelt_nxv32i32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -956,7 +956,7 @@ define i32 @extractelt_nxv32i32_idx(<vscale x 32 x i32> %v, i32 %idx) {
define i64 @extractelt_nxv16i64_0(<vscale x 16 x i64> %v) {
; CHECK-LABEL: extractelt_nxv16i64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmv.x.s a1, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
index 6fd250cf5965cb..f5e5b9e9083b8d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
@@ -135,7 +135,7 @@ define signext i8 @extractelt_nxv8i8_idx(<vscale x 8 x i8> %v, i32 zeroext %idx)
define signext i8 @extractelt_nxv16i8_0(<vscale x 16 x i8> %v) {
; CHECK-LABEL: extractelt_nxv16i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i8> %v, i32 0
@@ -145,7 +145,7 @@ define signext i8 @extractelt_nxv16i8_0(<vscale x 16 x i8> %v) {
define signext i8 @extractelt_nxv16i8_imm(<vscale x 16 x i8> %v) {
; CHECK-LABEL: extractelt_nxv16i8_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -167,7 +167,7 @@ define signext i8 @extractelt_nxv16i8_idx(<vscale x 16 x i8> %v, i32 zeroext %id
define signext i8 @extractelt_nxv32i8_0(<vscale x 32 x i8> %v) {
; CHECK-LABEL: extractelt_nxv32i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i8> %v, i32 0
@@ -177,7 +177,7 @@ define signext i8 @extractelt_nxv32i8_0(<vscale x 32 x i8> %v) {
define signext i8 @extractelt_nxv32i8_imm(<vscale x 32 x i8> %v) {
; CHECK-LABEL: extractelt_nxv32i8_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -199,7 +199,7 @@ define signext i8 @extractelt_nxv32i8_idx(<vscale x 32 x i8> %v, i32 zeroext %id
define signext i8 @extractelt_nxv64i8_0(<vscale x 64 x i8> %v) {
; CHECK-LABEL: extractelt_nxv64i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 64 x i8> %v, i32 0
@@ -209,7 +209,7 @@ define signext i8 @extractelt_nxv64i8_0(<vscale x 64 x i8> %v) {
define signext i8 @extractelt_nxv64i8_imm(<vscale x 64 x i8> %v) {
; CHECK-LABEL: extractelt_nxv64i8_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -327,7 +327,7 @@ define signext i16 @extractelt_nxv4i16_idx(<vscale x 4 x i16> %v, i32 zeroext %i
define signext i16 @extractelt_nxv8i16_0(<vscale x 8 x i16> %v) {
; CHECK-LABEL: extractelt_nxv8i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i16> %v, i32 0
@@ -337,7 +337,7 @@ define signext i16 @extractelt_nxv8i16_0(<vscale x 8 x i16> %v) {
define signext i16 @extractelt_nxv8i16_imm(<vscale x 8 x i16> %v) {
; CHECK-LABEL: extractelt_nxv8i16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -359,7 +359,7 @@ define signext i16 @extractelt_nxv8i16_idx(<vscale x 8 x i16> %v, i32 zeroext %i
define signext i16 @extractelt_nxv16i16_0(<vscale x 16 x i16> %v) {
; CHECK-LABEL: extractelt_nxv16i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i16> %v, i32 0
@@ -369,7 +369,7 @@ define signext i16 @extractelt_nxv16i16_0(<vscale x 16 x i16> %v) {
define signext i16 @extractelt_nxv16i16_imm(<vscale x 16 x i16> %v) {
; CHECK-LABEL: extractelt_nxv16i16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -391,7 +391,7 @@ define signext i16 @extractelt_nxv16i16_idx(<vscale x 16 x i16> %v, i32 zeroext
define signext i16 @extractelt_nxv32i16_0(<vscale x 32 x i16> %v) {
; CHECK-LABEL: extractelt_nxv32i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i16> %v, i32 0
@@ -401,7 +401,7 @@ define signext i16 @extractelt_nxv32i16_0(<vscale x 32 x i16> %v) {
define signext i16 @extractelt_nxv32i16_imm(<vscale x 32 x i16> %v) {
; CHECK-LABEL: extractelt_nxv32i16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -487,7 +487,7 @@ define signext i32 @extractelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 zeroext %i
define signext i32 @extractelt_nxv4i32_0(<vscale x 4 x i32> %v) {
; CHECK-LABEL: extractelt_nxv4i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i32> %v, i32 0
@@ -497,7 +497,7 @@ define signext i32 @extractelt_nxv4i32_0(<vscale x 4 x i32> %v) {
define signext i32 @extractelt_nxv4i32_imm(<vscale x 4 x i32> %v) {
; CHECK-LABEL: extractelt_nxv4i32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -519,7 +519,7 @@ define signext i32 @extractelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 zeroext %i
define signext i32 @extractelt_nxv8i32_0(<vscale x 8 x i32> %v) {
; CHECK-LABEL: extractelt_nxv8i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i32> %v, i32 0
@@ -529,7 +529,7 @@ define signext i32 @extractelt_nxv8i32_0(<vscale x 8 x i32> %v) {
define signext i32 @extractelt_nxv8i32_imm(<vscale x 8 x i32> %v) {
; CHECK-LABEL: extractelt_nxv8i32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -551,7 +551,7 @@ define signext i32 @extractelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 zeroext %i
define signext i32 @extractelt_nxv16i32_0(<vscale x 16 x i32> %v) {
; CHECK-LABEL: extractelt_nxv16i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i32> %v, i32 0
@@ -561,7 +561,7 @@ define signext i32 @extractelt_nxv16i32_0(<vscale x 16 x i32> %v) {
define signext i32 @extractelt_nxv16i32_imm(<vscale x 16 x i32> %v) {
; CHECK-LABEL: extractelt_nxv16i32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -615,7 +615,7 @@ define i64 @extractelt_nxv1i64_idx(<vscale x 1 x i64> %v, i32 zeroext %idx) {
define i64 @extractelt_nxv2i64_0(<vscale x 2 x i64> %v) {
; CHECK-LABEL: extractelt_nxv2i64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i64> %v, i32 0
@@ -647,7 +647,7 @@ define i64 @extractelt_nxv2i64_idx(<vscale x 2 x i64> %v, i32 zeroext %idx) {
define i64 @extractelt_nxv4i64_0(<vscale x 4 x i64> %v) {
; CHECK-LABEL: extractelt_nxv4i64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i64> %v, i32 0
@@ -657,7 +657,7 @@ define i64 @extractelt_nxv4i64_0(<vscale x 4 x i64> %v) {
define i64 @extractelt_nxv4i64_imm(<vscale x 4 x i64> %v) {
; CHECK-LABEL: extractelt_nxv4i64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -679,7 +679,7 @@ define i64 @extractelt_nxv4i64_idx(<vscale x 4 x i64> %v, i32 zeroext %idx) {
define i64 @extractelt_nxv8i64_0(<vscale x 8 x i64> %v) {
; CHECK-LABEL: extractelt_nxv8i64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i64> %v, i32 0
@@ -689,7 +689,7 @@ define i64 @extractelt_nxv8i64_0(<vscale x 8 x i64> %v) {
define i64 @extractelt_nxv8i64_imm(<vscale x 8 x i64> %v) {
; CHECK-LABEL: extractelt_nxv8i64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -711,7 +711,7 @@ define i64 @extractelt_nxv8i64_idx(<vscale x 8 x i64> %v, i32 zeroext %idx) {
define i32 @extractelt_add_nxv4i32_splat(<vscale x 4 x i32> %x) {
; CHECK-LABEL: extractelt_add_nxv4i32_splat:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: addiw a0, a0, 3
@@ -726,7 +726,7 @@ define i32 @extractelt_add_nxv4i32_splat(<vscale x 4 x i32> %x) {
define i32 @extractelt_sub_nxv4i32_splat(<vscale x 4 x i32> %x) {
; CHECK-LABEL: extractelt_sub_nxv4i32_splat:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: li a1, 3
@@ -745,14 +745,14 @@ define i32 @extractelt_mul_nxv4i32_splat(<vscale x 4 x i32> %x) {
; RV64NOM-NEXT: li a0, 3
; RV64NOM-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; RV64NOM-NEXT: vmul.vx v8, v8, a0
-; RV64NOM-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64NOM-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64NOM-NEXT: vslidedown.vi v8, v8, 3
; RV64NOM-NEXT: vmv.x.s a0, v8
; RV64NOM-NEXT: ret
;
; RV64M-LABEL: extractelt_mul_nxv4i32_splat:
; RV64M: # %bb.0:
-; RV64M-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64M-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64M-NEXT: vslidedown.vi v8, v8, 3
; RV64M-NEXT: vmv.x.s a0, v8
; RV64M-NEXT: slli a1, a0, 1
@@ -779,7 +779,7 @@ define i32 @extractelt_sdiv_nxv4i32_splat(<vscale x 4 x i32> %x) {
;
; RV64M-LABEL: extractelt_sdiv_nxv4i32_splat:
; RV64M: # %bb.0:
-; RV64M-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64M-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64M-NEXT: vmv.x.s a0, v8
; RV64M-NEXT: lui a1, 349525
; RV64M-NEXT: addiw a1, a1, 1366
@@ -809,7 +809,7 @@ define i32 @extractelt_udiv_nxv4i32_splat(<vscale x 4 x i32> %x) {
;
; RV64M-LABEL: extractelt_udiv_nxv4i32_splat:
; RV64M: # %bb.0:
-; RV64M-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64M-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64M-NEXT: vmv.x.s a0, v8
; RV64M-NEXT: lui a1, 349525
; RV64M-NEXT: addiw a1, a1, 1366
@@ -828,7 +828,7 @@ define i32 @extractelt_udiv_nxv4i32_splat(<vscale x 4 x i32> %x) {
define i64 @extractelt_nxv16i64_0(<vscale x 16 x i64> %v) {
; CHECK-LABEL: extractelt_nxv16i64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i64> %v, i32 0
@@ -879,7 +879,7 @@ define i64 @extractelt_nxv16i64_neg1(<vscale x 16 x i64> %v) {
define i64 @extractelt_nxv16i64_imm(<vscale x 16 x i64> %v) {
; CHECK-LABEL: extractelt_nxv16i64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
index dd9f5874fd66d8..cd806ed8e12572 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
@@ -110,7 +110,7 @@ define i8 @extractelt_v32i8(ptr %x) nounwind {
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 7
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -124,7 +124,7 @@ define i16 @extractelt_v16i16(ptr %x) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 7
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -178,7 +178,7 @@ define half @extractelt_v16f16(ptr %x) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 7
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
@@ -192,7 +192,7 @@ define float @extractelt_v8f32(ptr %x) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
index 957d2a4b9205ac..3b5ce8e82ad1fe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
@@ -312,7 +312,7 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vle64.v v8, (a0)
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: vslidedown.vi v12, v8, 1
; RV32-NEXT: vfmv.f.s fa3, v12
; RV32-NEXT: lui a0, %hi(.LCPI12_0)
@@ -335,9 +335,9 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV32-NEXT: vslide1down.vx v12, v8, a2
; RV32-NEXT: vslide1down.vx v12, v12, a0
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v16, v8, 2
-; RV32-NEXT: vfmv.f.s fa3, v16
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v14, v8, 2
+; RV32-NEXT: vfmv.f.s fa3, v14
; RV32-NEXT: feq.d a0, fa3, fa3
; RV32-NEXT: neg a0, a0
; RV32-NEXT: fmax.d fa3, fa3, fa5
@@ -346,9 +346,9 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
; RV32-NEXT: and a0, a0, a2
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV32-NEXT: vslide1down.vx v12, v12, a0
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v16, v8, 3
-; RV32-NEXT: vfmv.f.s fa3, v16
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v14, v8, 3
+; RV32-NEXT: vfmv.f.s fa3, v14
; RV32-NEXT: feq.d a0, fa3, fa3
; RV32-NEXT: neg a0, a0
; RV32-NEXT: fmax.d fa3, fa3, fa5
@@ -408,7 +408,7 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
-; RV64-NEXT: vsetivli zero, 1, e64, m4, ta, ma
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV64-NEXT: vslidedown.vi v12, v8, 1
; RV64-NEXT: vfmv.f.s fa3, v12
; RV64-NEXT: lui a0, %hi(.LCPI12_0)
@@ -431,9 +431,9 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64-NEXT: vslide1down.vx v12, v8, a2
; RV64-NEXT: vslide1down.vx v12, v12, a0
-; RV64-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV64-NEXT: vslidedown.vi v16, v8, 2
-; RV64-NEXT: vfmv.f.s fa3, v16
+; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT: vslidedown.vi v14, v8, 2
+; RV64-NEXT: vfmv.f.s fa3, v14
; RV64-NEXT: feq.d a0, fa3, fa3
; RV64-NEXT: neg a0, a0
; RV64-NEXT: fmax.d fa3, fa3, fa5
@@ -442,9 +442,9 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
; RV64-NEXT: and a0, a0, a2
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64-NEXT: vslide1down.vx v12, v12, a0
-; RV64-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV64-NEXT: vslidedown.vi v16, v8, 3
-; RV64-NEXT: vfmv.f.s fa3, v16
+; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT: vslidedown.vi v14, v8, 3
+; RV64-NEXT: vfmv.f.s fa3, v14
; RV64-NEXT: feq.d a0, fa3, fa3
; RV64-NEXT: neg a0, a0
; RV64-NEXT: fmax.d fa3, fa3, fa5
@@ -521,25 +521,25 @@ define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) {
; RV32-NEXT: fcvt.wu.d a0, fa4, rtz
; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32-NEXT: vslide1down.vx v12, v8, a0
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v16, v8, 1
-; RV32-NEXT: vfmv.f.s fa4, v16
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v13, v8, 1
+; RV32-NEXT: vfmv.f.s fa4, v13
; RV32-NEXT: fmax.d fa4, fa4, fa3
; RV32-NEXT: fmin.d fa4, fa4, fa5
; RV32-NEXT: fcvt.wu.d a0, fa4, rtz
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV32-NEXT: vslide1down.vx v12, v12, a0
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v16, v8, 2
-; RV32-NEXT: vfmv.f.s fa4, v16
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v14, v8, 2
+; RV32-NEXT: vfmv.f.s fa4, v14
; RV32-NEXT: fmax.d fa4, fa4, fa3
; RV32-NEXT: fmin.d fa4, fa4, fa5
; RV32-NEXT: fcvt.wu.d a0, fa4, rtz
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV32-NEXT: vslide1down.vx v12, v12, a0
-; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v16, v8, 3
-; RV32-NEXT: vfmv.f.s fa4, v16
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v14, v8, 3
+; RV32-NEXT: vfmv.f.s fa4, v14
; RV32-NEXT: fmax.d fa4, fa4, fa3
; RV32-NEXT: fmin.d fa4, fa4, fa5
; RV32-NEXT: fcvt.wu.d a0, fa4, rtz
@@ -593,25 +593,25 @@ define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) {
; RV64-NEXT: fcvt.lu.d a0, fa4, rtz
; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV64-NEXT: vslide1down.vx v12, v8, a0
-; RV64-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV64-NEXT: vslidedown.vi v16, v8, 1
-; RV64-NEXT: vfmv.f.s fa4, v16
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v13, v8, 1
+; RV64-NEXT: vfmv.f.s fa4, v13
; RV64-NEXT: fmax.d fa4, fa4, fa3
; RV64-NEXT: fmin.d fa4, fa4, fa5
; RV64-NEXT: fcvt.lu.d a0, fa4, rtz
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64-NEXT: vslide1down.vx v12, v12, a0
-; RV64-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV64-NEXT: vslidedown.vi v16, v8, 2
-; RV64-NEXT: vfmv.f.s fa4, v16
+; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT: vslidedown.vi v14, v8, 2
+; RV64-NEXT: vfmv.f.s fa4, v14
; RV64-NEXT: fmax.d fa4, fa4, fa3
; RV64-NEXT: fmin.d fa4, fa4, fa5
; RV64-NEXT: fcvt.lu.d a0, fa4, rtz
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64-NEXT: vslide1down.vx v12, v12, a0
-; RV64-NEXT: vsetivli zero, 1, e64, m4, ta, ma
-; RV64-NEXT: vslidedown.vi v16, v8, 3
-; RV64-NEXT: vfmv.f.s fa4, v16
+; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT: vslidedown.vi v14, v8, 3
+; RV64-NEXT: vfmv.f.s fa4, v14
; RV64-NEXT: fmax.d fa4, fa4, fa3
; RV64-NEXT: fmin.d fa4, fa4, fa5
; RV64-NEXT: fcvt.lu.d a0, fa4, rtz
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
index 543a5ce4506d7c..1fff6b0e82f1e4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
@@ -49,13 +49,13 @@ define void @insertelt_v4i64_store(ptr %x, i64 %y) {
define <3 x i64> @insertelt_v3i64(<3 x i64> %a, i64 %y) {
; RV32-LABEL: insertelt_v3i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v10, v8, 3
-; RV32-NEXT: vmv.x.s a2, v10
-; RV32-NEXT: vslidedown.vi v10, v8, 2
-; RV32-NEXT: vmv.x.s a3, v10
-; RV32-NEXT: vslidedown.vi v10, v8, 1
-; RV32-NEXT: vmv.x.s a4, v10
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v9, v8, 3
+; RV32-NEXT: vmv.x.s a2, v9
+; RV32-NEXT: vslidedown.vi v9, v8, 2
+; RV32-NEXT: vmv.x.s a3, v9
+; RV32-NEXT: vslidedown.vi v9, v8, 1
+; RV32-NEXT: vmv.x.s a4, v9
; RV32-NEXT: vmv.x.s a5, v8
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a5
@@ -69,9 +69,9 @@ define <3 x i64> @insertelt_v3i64(<3 x i64> %a, i64 %y) {
;
; RV64-LABEL: insertelt_v3i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v10, v8, 1
-; RV64-NEXT: vmv.x.s a1, v10
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v9, v8, 1
+; RV64-NEXT: vmv.x.s a1, v9
; RV64-NEXT: vmv.x.s a2, v8
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vslide1down.vx v8, v8, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 9cde97dcd5b0ba..65ed472bb74e20 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -4004,7 +4004,7 @@ define <8 x i64> @mgather_v8i64(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i64> %passthr
; RV32ZVE32F-NEXT: andi a2, t0, 1
; RV32ZVE32F-NEXT: beqz a2, .LBB47_7
; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a2, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -4040,7 +4040,7 @@ define <8 x i64> @mgather_v8i64(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i64> %passthr
; RV32ZVE32F-NEXT: andi a4, t0, 2
; RV32ZVE32F-NEXT: beqz a4, .LBB47_2
; RV32ZVE32F-NEXT: .LBB47_8: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a5, v10
; RV32ZVE32F-NEXT: lw a4, 4(a5)
@@ -4048,7 +4048,7 @@ define <8 x i64> @mgather_v8i64(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i64> %passthr
; RV32ZVE32F-NEXT: andi a6, t0, 4
; RV32ZVE32F-NEXT: beqz a6, .LBB47_3
; RV32ZVE32F-NEXT: .LBB47_9: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a7, v10
; RV32ZVE32F-NEXT: lw a6, 4(a7)
@@ -4056,7 +4056,7 @@ define <8 x i64> @mgather_v8i64(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i64> %passthr
; RV32ZVE32F-NEXT: andi t1, t0, 8
; RV32ZVE32F-NEXT: beqz t1, .LBB47_4
; RV32ZVE32F-NEXT: .LBB47_10: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s t2, v10
; RV32ZVE32F-NEXT: lw t1, 4(t2)
@@ -4251,7 +4251,7 @@ define <8 x i64> @mgather_baseidx_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV32ZVE32F-NEXT: andi a1, t0, 1
; RV32ZVE32F-NEXT: beqz a1, .LBB48_7
; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -4287,7 +4287,7 @@ define <8 x i64> @mgather_baseidx_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV32ZVE32F-NEXT: andi a4, t0, 2
; RV32ZVE32F-NEXT: beqz a4, .LBB48_2
; RV32ZVE32F-NEXT: .LBB48_8: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a5, v10
; RV32ZVE32F-NEXT: lw a4, 4(a5)
@@ -4295,7 +4295,7 @@ define <8 x i64> @mgather_baseidx_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV32ZVE32F-NEXT: andi a6, t0, 4
; RV32ZVE32F-NEXT: beqz a6, .LBB48_3
; RV32ZVE32F-NEXT: .LBB48_9: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a7, v10
; RV32ZVE32F-NEXT: lw a6, 4(a7)
@@ -4303,7 +4303,7 @@ define <8 x i64> @mgather_baseidx_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV32ZVE32F-NEXT: andi t1, t0, 8
; RV32ZVE32F-NEXT: beqz t1, .LBB48_4
; RV32ZVE32F-NEXT: .LBB48_10: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s t2, v10
; RV32ZVE32F-NEXT: lw t1, 4(t2)
@@ -4525,7 +4525,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV32ZVE32F-NEXT: andi a1, t0, 1
; RV32ZVE32F-NEXT: beqz a1, .LBB49_7
; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -4561,7 +4561,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV32ZVE32F-NEXT: andi a4, t0, 2
; RV32ZVE32F-NEXT: beqz a4, .LBB49_2
; RV32ZVE32F-NEXT: .LBB49_8: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a5, v10
; RV32ZVE32F-NEXT: lw a4, 4(a5)
@@ -4569,7 +4569,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV32ZVE32F-NEXT: andi a6, t0, 4
; RV32ZVE32F-NEXT: beqz a6, .LBB49_3
; RV32ZVE32F-NEXT: .LBB49_9: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a7, v10
; RV32ZVE32F-NEXT: lw a6, 4(a7)
@@ -4577,7 +4577,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV32ZVE32F-NEXT: andi t1, t0, 8
; RV32ZVE32F-NEXT: beqz t1, .LBB49_4
; RV32ZVE32F-NEXT: .LBB49_10: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s t2, v10
; RV32ZVE32F-NEXT: lw t1, 4(t2)
@@ -4800,7 +4800,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV32ZVE32F-NEXT: andi a1, t0, 1
; RV32ZVE32F-NEXT: beqz a1, .LBB50_7
; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -4836,7 +4836,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV32ZVE32F-NEXT: andi a4, t0, 2
; RV32ZVE32F-NEXT: beqz a4, .LBB50_2
; RV32ZVE32F-NEXT: .LBB50_8: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a5, v10
; RV32ZVE32F-NEXT: lw a4, 4(a5)
@@ -4844,7 +4844,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV32ZVE32F-NEXT: andi a6, t0, 4
; RV32ZVE32F-NEXT: beqz a6, .LBB50_3
; RV32ZVE32F-NEXT: .LBB50_9: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a7, v10
; RV32ZVE32F-NEXT: lw a6, 4(a7)
@@ -4852,7 +4852,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV32ZVE32F-NEXT: andi t1, t0, 8
; RV32ZVE32F-NEXT: beqz t1, .LBB50_4
; RV32ZVE32F-NEXT: .LBB50_10: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s t2, v10
; RV32ZVE32F-NEXT: lw t1, 4(t2)
@@ -5083,7 +5083,7 @@ define <8 x i64> @mgather_baseidx_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <8 x i
; RV32ZVE32F-NEXT: andi a1, t0, 1
; RV32ZVE32F-NEXT: beqz a1, .LBB51_7
; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -5119,7 +5119,7 @@ define <8 x i64> @mgather_baseidx_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <8 x i
; RV32ZVE32F-NEXT: andi a4, t0, 2
; RV32ZVE32F-NEXT: beqz a4, .LBB51_2
; RV32ZVE32F-NEXT: .LBB51_8: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a5, v10
; RV32ZVE32F-NEXT: lw a4, 4(a5)
@@ -5127,7 +5127,7 @@ define <8 x i64> @mgather_baseidx_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <8 x i
; RV32ZVE32F-NEXT: andi a6, t0, 4
; RV32ZVE32F-NEXT: beqz a6, .LBB51_3
; RV32ZVE32F-NEXT: .LBB51_9: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a7, v10
; RV32ZVE32F-NEXT: lw a6, 4(a7)
@@ -5135,7 +5135,7 @@ define <8 x i64> @mgather_baseidx_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <8 x i
; RV32ZVE32F-NEXT: andi t1, t0, 8
; RV32ZVE32F-NEXT: beqz t1, .LBB51_4
; RV32ZVE32F-NEXT: .LBB51_10: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s t2, v10
; RV32ZVE32F-NEXT: lw t1, 4(t2)
@@ -5358,7 +5358,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV32ZVE32F-NEXT: andi a1, t0, 1
; RV32ZVE32F-NEXT: beqz a1, .LBB52_7
; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -5394,7 +5394,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV32ZVE32F-NEXT: andi a4, t0, 2
; RV32ZVE32F-NEXT: beqz a4, .LBB52_2
; RV32ZVE32F-NEXT: .LBB52_8: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a5, v10
; RV32ZVE32F-NEXT: lw a4, 4(a5)
@@ -5402,7 +5402,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV32ZVE32F-NEXT: andi a6, t0, 4
; RV32ZVE32F-NEXT: beqz a6, .LBB52_3
; RV32ZVE32F-NEXT: .LBB52_9: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a7, v10
; RV32ZVE32F-NEXT: lw a6, 4(a7)
@@ -5410,7 +5410,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV32ZVE32F-NEXT: andi t1, t0, 8
; RV32ZVE32F-NEXT: beqz t1, .LBB52_4
; RV32ZVE32F-NEXT: .LBB52_10: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s t2, v10
; RV32ZVE32F-NEXT: lw t1, 4(t2)
@@ -5634,7 +5634,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV32ZVE32F-NEXT: andi a1, t0, 1
; RV32ZVE32F-NEXT: beqz a1, .LBB53_7
; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -5670,7 +5670,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV32ZVE32F-NEXT: andi a4, t0, 2
; RV32ZVE32F-NEXT: beqz a4, .LBB53_2
; RV32ZVE32F-NEXT: .LBB53_8: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a5, v10
; RV32ZVE32F-NEXT: lw a4, 4(a5)
@@ -5678,7 +5678,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV32ZVE32F-NEXT: andi a6, t0, 4
; RV32ZVE32F-NEXT: beqz a6, .LBB53_3
; RV32ZVE32F-NEXT: .LBB53_9: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a7, v10
; RV32ZVE32F-NEXT: lw a6, 4(a7)
@@ -5686,7 +5686,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV32ZVE32F-NEXT: andi t1, t0, 8
; RV32ZVE32F-NEXT: beqz t1, .LBB53_4
; RV32ZVE32F-NEXT: .LBB53_10: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s t2, v10
; RV32ZVE32F-NEXT: lw t1, 4(t2)
@@ -5918,7 +5918,7 @@ define <8 x i64> @mgather_baseidx_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <8 x i
; RV32ZVE32F-NEXT: andi a1, t0, 1
; RV32ZVE32F-NEXT: beqz a1, .LBB54_7
; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -5954,7 +5954,7 @@ define <8 x i64> @mgather_baseidx_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <8 x i
; RV32ZVE32F-NEXT: andi a4, t0, 2
; RV32ZVE32F-NEXT: beqz a4, .LBB54_2
; RV32ZVE32F-NEXT: .LBB54_8: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a5, v10
; RV32ZVE32F-NEXT: lw a4, 4(a5)
@@ -5962,7 +5962,7 @@ define <8 x i64> @mgather_baseidx_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <8 x i
; RV32ZVE32F-NEXT: andi a6, t0, 4
; RV32ZVE32F-NEXT: beqz a6, .LBB54_3
; RV32ZVE32F-NEXT: .LBB54_9: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a7, v10
; RV32ZVE32F-NEXT: lw a6, 4(a7)
@@ -5970,7 +5970,7 @@ define <8 x i64> @mgather_baseidx_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <8 x i
; RV32ZVE32F-NEXT: andi t1, t0, 8
; RV32ZVE32F-NEXT: beqz t1, .LBB54_4
; RV32ZVE32F-NEXT: .LBB54_10: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s t2, v10
; RV32ZVE32F-NEXT: lw t1, 4(t2)
@@ -6191,7 +6191,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <
; RV32ZVE32F-NEXT: andi a1, t0, 1
; RV32ZVE32F-NEXT: beqz a1, .LBB55_7
; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -6227,7 +6227,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <
; RV32ZVE32F-NEXT: andi a4, t0, 2
; RV32ZVE32F-NEXT: beqz a4, .LBB55_2
; RV32ZVE32F-NEXT: .LBB55_8: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a5, v10
; RV32ZVE32F-NEXT: lw a4, 4(a5)
@@ -6235,7 +6235,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <
; RV32ZVE32F-NEXT: andi a6, t0, 4
; RV32ZVE32F-NEXT: beqz a6, .LBB55_3
; RV32ZVE32F-NEXT: .LBB55_9: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a7, v10
; RV32ZVE32F-NEXT: lw a6, 4(a7)
@@ -6243,7 +6243,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <
; RV32ZVE32F-NEXT: andi t1, t0, 8
; RV32ZVE32F-NEXT: beqz t1, .LBB55_4
; RV32ZVE32F-NEXT: .LBB55_10: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s t2, v10
; RV32ZVE32F-NEXT: lw t1, 4(t2)
@@ -6465,7 +6465,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <
; RV32ZVE32F-NEXT: andi a1, t0, 1
; RV32ZVE32F-NEXT: beqz a1, .LBB56_7
; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -6501,7 +6501,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <
; RV32ZVE32F-NEXT: andi a4, t0, 2
; RV32ZVE32F-NEXT: beqz a4, .LBB56_2
; RV32ZVE32F-NEXT: .LBB56_8: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a5, v10
; RV32ZVE32F-NEXT: lw a4, 4(a5)
@@ -6509,7 +6509,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <
; RV32ZVE32F-NEXT: andi a6, t0, 4
; RV32ZVE32F-NEXT: beqz a6, .LBB56_3
; RV32ZVE32F-NEXT: .LBB56_9: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a7, v10
; RV32ZVE32F-NEXT: lw a6, 4(a7)
@@ -6517,7 +6517,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <
; RV32ZVE32F-NEXT: andi t1, t0, 8
; RV32ZVE32F-NEXT: beqz t1, .LBB56_4
; RV32ZVE32F-NEXT: .LBB56_10: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s t2, v10
; RV32ZVE32F-NEXT: lw t1, 4(t2)
@@ -6763,7 +6763,7 @@ define <8 x i64> @mgather_baseidx_v8i64(ptr %base, <8 x i64> %idxs, <8 x i1> %m,
; RV32ZVE32F-NEXT: andi a1, t0, 1
; RV32ZVE32F-NEXT: beqz a1, .LBB57_7
; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a2, v8
; RV32ZVE32F-NEXT: lw a1, 4(a2)
; RV32ZVE32F-NEXT: lw a2, 0(a2)
@@ -6799,7 +6799,7 @@ define <8 x i64> @mgather_baseidx_v8i64(ptr %base, <8 x i64> %idxs, <8 x i1> %m,
; RV32ZVE32F-NEXT: andi a4, t0, 2
; RV32ZVE32F-NEXT: beqz a4, .LBB57_2
; RV32ZVE32F-NEXT: .LBB57_8: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a5, v10
; RV32ZVE32F-NEXT: lw a4, 4(a5)
@@ -6807,7 +6807,7 @@ define <8 x i64> @mgather_baseidx_v8i64(ptr %base, <8 x i64> %idxs, <8 x i1> %m,
; RV32ZVE32F-NEXT: andi a6, t0, 4
; RV32ZVE32F-NEXT: beqz a6, .LBB57_3
; RV32ZVE32F-NEXT: .LBB57_9: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a7, v10
; RV32ZVE32F-NEXT: lw a6, 4(a7)
@@ -6815,7 +6815,7 @@ define <8 x i64> @mgather_baseidx_v8i64(ptr %base, <8 x i64> %idxs, <8 x i1> %m,
; RV32ZVE32F-NEXT: andi t1, t0, 8
; RV32ZVE32F-NEXT: beqz t1, .LBB57_4
; RV32ZVE32F-NEXT: .LBB57_10: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s t2, v10
; RV32ZVE32F-NEXT: lw t1, 4(t2)
@@ -9771,27 +9771,27 @@ define <8 x double> @mgather_v8f64(<8 x ptr> %ptrs, <8 x i1> %m, <8 x double> %p
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB86_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a2, v8
; RV32ZVE32F-NEXT: fld fa0, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 2
; RV32ZVE32F-NEXT: beqz a2, .LBB86_2
; RV32ZVE32F-NEXT: .LBB86_11: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa1, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 4
; RV32ZVE32F-NEXT: beqz a2, .LBB86_3
; RV32ZVE32F-NEXT: .LBB86_12: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa2, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 8
; RV32ZVE32F-NEXT: beqz a2, .LBB86_4
; RV32ZVE32F-NEXT: .LBB86_13: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa3, 0(a2)
@@ -9967,27 +9967,27 @@ define <8 x double> @mgather_baseidx_v8i8_v8f64(ptr %base, <8 x i8> %idxs, <8 x
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB87_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a2, v8
; RV32ZVE32F-NEXT: fld fa0, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 2
; RV32ZVE32F-NEXT: beqz a2, .LBB87_2
; RV32ZVE32F-NEXT: .LBB87_11: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa1, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 4
; RV32ZVE32F-NEXT: beqz a2, .LBB87_3
; RV32ZVE32F-NEXT: .LBB87_12: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa2, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 8
; RV32ZVE32F-NEXT: beqz a2, .LBB87_4
; RV32ZVE32F-NEXT: .LBB87_13: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa3, 0(a2)
@@ -10182,27 +10182,27 @@ define <8 x double> @mgather_baseidx_sext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB88_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a2, v8
; RV32ZVE32F-NEXT: fld fa0, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 2
; RV32ZVE32F-NEXT: beqz a2, .LBB88_2
; RV32ZVE32F-NEXT: .LBB88_11: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa1, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 4
; RV32ZVE32F-NEXT: beqz a2, .LBB88_3
; RV32ZVE32F-NEXT: .LBB88_12: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa2, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 8
; RV32ZVE32F-NEXT: beqz a2, .LBB88_4
; RV32ZVE32F-NEXT: .LBB88_13: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa3, 0(a2)
@@ -10398,27 +10398,27 @@ define <8 x double> @mgather_baseidx_zext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB89_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a2, v8
; RV32ZVE32F-NEXT: fld fa0, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 2
; RV32ZVE32F-NEXT: beqz a2, .LBB89_2
; RV32ZVE32F-NEXT: .LBB89_11: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa1, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 4
; RV32ZVE32F-NEXT: beqz a2, .LBB89_3
; RV32ZVE32F-NEXT: .LBB89_12: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa2, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 8
; RV32ZVE32F-NEXT: beqz a2, .LBB89_4
; RV32ZVE32F-NEXT: .LBB89_13: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa3, 0(a2)
@@ -10622,27 +10622,27 @@ define <8 x double> @mgather_baseidx_v8i16_v8f64(ptr %base, <8 x i16> %idxs, <8
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB90_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a2, v8
; RV32ZVE32F-NEXT: fld fa0, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 2
; RV32ZVE32F-NEXT: beqz a2, .LBB90_2
; RV32ZVE32F-NEXT: .LBB90_11: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa1, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 4
; RV32ZVE32F-NEXT: beqz a2, .LBB90_3
; RV32ZVE32F-NEXT: .LBB90_12: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa2, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 8
; RV32ZVE32F-NEXT: beqz a2, .LBB90_4
; RV32ZVE32F-NEXT: .LBB90_13: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa3, 0(a2)
@@ -10838,27 +10838,27 @@ define <8 x double> @mgather_baseidx_sext_v8i16_v8f64(ptr %base, <8 x i16> %idxs
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB91_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a2, v8
; RV32ZVE32F-NEXT: fld fa0, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 2
; RV32ZVE32F-NEXT: beqz a2, .LBB91_2
; RV32ZVE32F-NEXT: .LBB91_11: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa1, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 4
; RV32ZVE32F-NEXT: beqz a2, .LBB91_3
; RV32ZVE32F-NEXT: .LBB91_12: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa2, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 8
; RV32ZVE32F-NEXT: beqz a2, .LBB91_4
; RV32ZVE32F-NEXT: .LBB91_13: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa3, 0(a2)
@@ -11055,27 +11055,27 @@ define <8 x double> @mgather_baseidx_zext_v8i16_v8f64(ptr %base, <8 x i16> %idxs
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB92_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a2, v8
; RV32ZVE32F-NEXT: fld fa0, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 2
; RV32ZVE32F-NEXT: beqz a2, .LBB92_2
; RV32ZVE32F-NEXT: .LBB92_11: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa1, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 4
; RV32ZVE32F-NEXT: beqz a2, .LBB92_3
; RV32ZVE32F-NEXT: .LBB92_12: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa2, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 8
; RV32ZVE32F-NEXT: beqz a2, .LBB92_4
; RV32ZVE32F-NEXT: .LBB92_13: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa3, 0(a2)
@@ -11280,27 +11280,27 @@ define <8 x double> @mgather_baseidx_v8i32_v8f64(ptr %base, <8 x i32> %idxs, <8
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB93_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a2, v8
; RV32ZVE32F-NEXT: fld fa0, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 2
; RV32ZVE32F-NEXT: beqz a2, .LBB93_2
; RV32ZVE32F-NEXT: .LBB93_11: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa1, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 4
; RV32ZVE32F-NEXT: beqz a2, .LBB93_3
; RV32ZVE32F-NEXT: .LBB93_12: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa2, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 8
; RV32ZVE32F-NEXT: beqz a2, .LBB93_4
; RV32ZVE32F-NEXT: .LBB93_13: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa3, 0(a2)
@@ -11494,27 +11494,27 @@ define <8 x double> @mgather_baseidx_sext_v8i32_v8f64(ptr %base, <8 x i32> %idxs
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB94_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a2, v8
; RV32ZVE32F-NEXT: fld fa0, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 2
; RV32ZVE32F-NEXT: beqz a2, .LBB94_2
; RV32ZVE32F-NEXT: .LBB94_11: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa1, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 4
; RV32ZVE32F-NEXT: beqz a2, .LBB94_3
; RV32ZVE32F-NEXT: .LBB94_12: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa2, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 8
; RV32ZVE32F-NEXT: beqz a2, .LBB94_4
; RV32ZVE32F-NEXT: .LBB94_13: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa3, 0(a2)
@@ -11709,27 +11709,27 @@ define <8 x double> @mgather_baseidx_zext_v8i32_v8f64(ptr %base, <8 x i32> %idxs
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB95_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a2, v8
; RV32ZVE32F-NEXT: fld fa0, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 2
; RV32ZVE32F-NEXT: beqz a2, .LBB95_2
; RV32ZVE32F-NEXT: .LBB95_11: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa1, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 4
; RV32ZVE32F-NEXT: beqz a2, .LBB95_3
; RV32ZVE32F-NEXT: .LBB95_12: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa2, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 8
; RV32ZVE32F-NEXT: beqz a2, .LBB95_4
; RV32ZVE32F-NEXT: .LBB95_13: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa3, 0(a2)
@@ -11948,27 +11948,27 @@ define <8 x double> @mgather_baseidx_v8f64(ptr %base, <8 x i64> %idxs, <8 x i1>
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB96_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a2, v8
; RV32ZVE32F-NEXT: fld fa0, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 2
; RV32ZVE32F-NEXT: beqz a2, .LBB96_2
; RV32ZVE32F-NEXT: .LBB96_11: # %cond.load1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa1, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 4
; RV32ZVE32F-NEXT: beqz a2, .LBB96_3
; RV32ZVE32F-NEXT: .LBB96_12: # %cond.load4
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa2, 0(a2)
; RV32ZVE32F-NEXT: andi a2, a1, 8
; RV32ZVE32F-NEXT: beqz a2, .LBB96_4
; RV32ZVE32F-NEXT: .LBB96_13: # %cond.load7
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a2, v10
; RV32ZVE32F-NEXT: fld fa3, 0(a2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index 61b23aad130ac9..67043045e7b52f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -1898,24 +1898,24 @@ define void @mscatter_v8i32(<8 x i32> %val, <8 x ptr> %ptrs, <8 x i1> %m) {
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB28_9: # %cond.store
; RV64ZVE32F-NEXT: ld a0, 0(a0)
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: andi a0, a3, 2
; RV64ZVE32F-NEXT: beqz a0, .LBB28_2
; RV64ZVE32F-NEXT: .LBB28_10: # %cond.store1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vse32.v v10, (t0)
; RV64ZVE32F-NEXT: andi a0, a3, 4
; RV64ZVE32F-NEXT: beqz a0, .LBB28_3
; RV64ZVE32F-NEXT: .LBB28_11: # %cond.store3
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV64ZVE32F-NEXT: vse32.v v10, (a7)
; RV64ZVE32F-NEXT: andi a0, a3, 8
; RV64ZVE32F-NEXT: beqz a0, .LBB28_4
; RV64ZVE32F-NEXT: .LBB28_12: # %cond.store5
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV64ZVE32F-NEXT: vse32.v v10, (a6)
; RV64ZVE32F-NEXT: andi a0, a3, 16
@@ -1975,7 +1975,7 @@ define void @mscatter_baseidx_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a2)
; RV64ZVE32F-NEXT: .LBB29_2: # %else
; RV64ZVE32F-NEXT: andi a2, a1, 2
@@ -1986,9 +1986,9 @@ define void @mscatter_baseidx_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
-; RV64ZVE32F-NEXT: vse32.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 1
+; RV64ZVE32F-NEXT: vse32.v v11, (a2)
; RV64ZVE32F-NEXT: .LBB29_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
@@ -2028,7 +2028,7 @@ define void @mscatter_baseidx_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
@@ -2039,9 +2039,9 @@ define void @mscatter_baseidx_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3
-; RV64ZVE32F-NEXT: vse32.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3
+; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB29_7
; RV64ZVE32F-NEXT: .LBB29_14: # %cond.store7
@@ -2107,7 +2107,7 @@ define void @mscatter_baseidx_sext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a2)
; RV64ZVE32F-NEXT: .LBB30_2: # %else
; RV64ZVE32F-NEXT: andi a2, a1, 2
@@ -2118,9 +2118,9 @@ define void @mscatter_baseidx_sext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
-; RV64ZVE32F-NEXT: vse32.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 1
+; RV64ZVE32F-NEXT: vse32.v v11, (a2)
; RV64ZVE32F-NEXT: .LBB30_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
@@ -2160,7 +2160,7 @@ define void @mscatter_baseidx_sext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
@@ -2171,9 +2171,9 @@ define void @mscatter_baseidx_sext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3
-; RV64ZVE32F-NEXT: vse32.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3
+; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB30_7
; RV64ZVE32F-NEXT: .LBB30_14: # %cond.store7
@@ -2241,7 +2241,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a2)
; RV64ZVE32F-NEXT: .LBB31_2: # %else
; RV64ZVE32F-NEXT: andi a2, a1, 2
@@ -2253,9 +2253,9 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
-; RV64ZVE32F-NEXT: vse32.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 1
+; RV64ZVE32F-NEXT: vse32.v v11, (a2)
; RV64ZVE32F-NEXT: .LBB31_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
@@ -2297,7 +2297,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
@@ -2309,9 +2309,9 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3
-; RV64ZVE32F-NEXT: vse32.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3
+; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB31_7
; RV64ZVE32F-NEXT: .LBB31_14: # %cond.store7
@@ -2382,7 +2382,7 @@ define void @mscatter_baseidx_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i16> %
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a2)
; RV64ZVE32F-NEXT: .LBB32_2: # %else
; RV64ZVE32F-NEXT: andi a2, a1, 2
@@ -2393,9 +2393,9 @@ define void @mscatter_baseidx_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i16> %
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
-; RV64ZVE32F-NEXT: vse32.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 1
+; RV64ZVE32F-NEXT: vse32.v v11, (a2)
; RV64ZVE32F-NEXT: .LBB32_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
@@ -2435,7 +2435,7 @@ define void @mscatter_baseidx_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i16> %
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
@@ -2446,9 +2446,9 @@ define void @mscatter_baseidx_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i16> %
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3
-; RV64ZVE32F-NEXT: vse32.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3
+; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB32_7
; RV64ZVE32F-NEXT: .LBB32_14: # %cond.store7
@@ -2515,7 +2515,7 @@ define void @mscatter_baseidx_sext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a2)
; RV64ZVE32F-NEXT: .LBB33_2: # %else
; RV64ZVE32F-NEXT: andi a2, a1, 2
@@ -2526,9 +2526,9 @@ define void @mscatter_baseidx_sext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
-; RV64ZVE32F-NEXT: vse32.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 1
+; RV64ZVE32F-NEXT: vse32.v v11, (a2)
; RV64ZVE32F-NEXT: .LBB33_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
@@ -2568,7 +2568,7 @@ define void @mscatter_baseidx_sext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
@@ -2579,9 +2579,9 @@ define void @mscatter_baseidx_sext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3
-; RV64ZVE32F-NEXT: vse32.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3
+; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB33_7
; RV64ZVE32F-NEXT: .LBB33_14: # %cond.store7
@@ -2652,7 +2652,7 @@ define void @mscatter_baseidx_zext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: and a3, a3, a1
; RV64ZVE32F-NEXT: slli a3, a3, 2
; RV64ZVE32F-NEXT: add a3, a0, a3
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a3)
; RV64ZVE32F-NEXT: .LBB34_2: # %else
; RV64ZVE32F-NEXT: andi a3, a2, 2
@@ -2664,9 +2664,9 @@ define void @mscatter_baseidx_zext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: and a3, a3, a1
; RV64ZVE32F-NEXT: slli a3, a3, 2
; RV64ZVE32F-NEXT: add a3, a0, a3
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
-; RV64ZVE32F-NEXT: vse32.v v12, (a3)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 1
+; RV64ZVE32F-NEXT: vse32.v v11, (a3)
; RV64ZVE32F-NEXT: .LBB34_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
@@ -2708,7 +2708,7 @@ define void @mscatter_baseidx_zext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: and a3, a3, a1
; RV64ZVE32F-NEXT: slli a3, a3, 2
; RV64ZVE32F-NEXT: add a3, a0, a3
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
; RV64ZVE32F-NEXT: vse32.v v12, (a3)
; RV64ZVE32F-NEXT: andi a3, a2, 8
@@ -2720,9 +2720,9 @@ define void @mscatter_baseidx_zext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: and a3, a3, a1
; RV64ZVE32F-NEXT: slli a3, a3, 2
; RV64ZVE32F-NEXT: add a3, a0, a3
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3
-; RV64ZVE32F-NEXT: vse32.v v12, (a3)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3
+; RV64ZVE32F-NEXT: vse32.v v10, (a3)
; RV64ZVE32F-NEXT: andi a3, a2, 16
; RV64ZVE32F-NEXT: beqz a3, .LBB34_7
; RV64ZVE32F-NEXT: .LBB34_14: # %cond.store7
@@ -2788,7 +2788,7 @@ define void @mscatter_baseidx_v8i32(<8 x i32> %val, ptr %base, <8 x i32> %idxs,
; RV64ZVE32F-NEXT: andi a2, a1, 1
; RV64ZVE32F-NEXT: beqz a2, .LBB35_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.store
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -2802,7 +2802,6 @@ define void @mscatter_baseidx_v8i32(<8 x i32> %val, ptr %base, <8 x i32> %idxs,
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB35_4: # %else2
@@ -2844,9 +2843,9 @@ define void @mscatter_baseidx_v8i32(<8 x i32> %val, ptr %base, <8 x i32> %idxs,
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 2
-; RV64ZVE32F-NEXT: vse32.v v14, (a2)
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vse32.v v11, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB35_6
; RV64ZVE32F-NEXT: .LBB35_13: # %cond.store5
@@ -2855,7 +2854,6 @@ define void @mscatter_baseidx_v8i32(<8 x i32> %val, ptr %base, <8 x i32> %idxs,
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
@@ -3319,14 +3317,14 @@ define void @mscatter_v8i64(<8 x i64> %val, <8 x ptr> %ptrs, <8 x i1> %m) {
; RV32ZVE32F-NEXT: .LBB41_10: # %cond.store
; RV32ZVE32F-NEXT: lw s1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
; RV32ZVE32F-NEXT: sw s1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
; RV32ZVE32F-NEXT: andi a0, a6, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB41_2
; RV32ZVE32F-NEXT: .LBB41_11: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw s0, 4(a0)
@@ -3334,7 +3332,7 @@ define void @mscatter_v8i64(<8 x i64> %val, <8 x ptr> %ptrs, <8 x i1> %m) {
; RV32ZVE32F-NEXT: andi a0, a6, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB41_3
; RV32ZVE32F-NEXT: .LBB41_12: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t5, 0(a0)
@@ -3342,7 +3340,7 @@ define void @mscatter_v8i64(<8 x i64> %val, <8 x ptr> %ptrs, <8 x i1> %m) {
; RV32ZVE32F-NEXT: andi a0, a6, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB41_4
; RV32ZVE32F-NEXT: .LBB41_13: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t3, 0(a0)
@@ -3553,14 +3551,14 @@ define void @mscatter_baseidx_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8> %id
; RV32ZVE32F-NEXT: .LBB42_10: # %cond.store
; RV32ZVE32F-NEXT: lw s1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
; RV32ZVE32F-NEXT: sw s1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
; RV32ZVE32F-NEXT: andi a0, a1, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB42_2
; RV32ZVE32F-NEXT: .LBB42_11: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw s0, 4(a0)
@@ -3568,7 +3566,7 @@ define void @mscatter_baseidx_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8> %id
; RV32ZVE32F-NEXT: andi a0, a1, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB42_3
; RV32ZVE32F-NEXT: .LBB42_12: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t5, 0(a0)
@@ -3576,7 +3574,7 @@ define void @mscatter_baseidx_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8> %id
; RV32ZVE32F-NEXT: andi a0, a1, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB42_4
; RV32ZVE32F-NEXT: .LBB42_13: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t3, 0(a0)
@@ -3797,14 +3795,14 @@ define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: .LBB43_10: # %cond.store
; RV32ZVE32F-NEXT: lw s1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
; RV32ZVE32F-NEXT: sw s1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
; RV32ZVE32F-NEXT: andi a0, a1, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB43_2
; RV32ZVE32F-NEXT: .LBB43_11: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw s0, 4(a0)
@@ -3812,7 +3810,7 @@ define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: andi a0, a1, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB43_3
; RV32ZVE32F-NEXT: .LBB43_12: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t5, 0(a0)
@@ -3820,7 +3818,7 @@ define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: andi a0, a1, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB43_4
; RV32ZVE32F-NEXT: .LBB43_13: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t3, 0(a0)
@@ -4042,14 +4040,14 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: .LBB44_10: # %cond.store
; RV32ZVE32F-NEXT: lw s1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
; RV32ZVE32F-NEXT: sw s1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
; RV32ZVE32F-NEXT: andi a0, a1, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB44_2
; RV32ZVE32F-NEXT: .LBB44_11: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw s0, 4(a0)
@@ -4057,7 +4055,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: andi a0, a1, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB44_3
; RV32ZVE32F-NEXT: .LBB44_12: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t5, 0(a0)
@@ -4065,7 +4063,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: andi a0, a1, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB44_4
; RV32ZVE32F-NEXT: .LBB44_13: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t3, 0(a0)
@@ -4295,14 +4293,14 @@ define void @mscatter_baseidx_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i16> %
; RV32ZVE32F-NEXT: .LBB45_10: # %cond.store
; RV32ZVE32F-NEXT: lw s1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
; RV32ZVE32F-NEXT: sw s1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
; RV32ZVE32F-NEXT: andi a0, a1, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB45_2
; RV32ZVE32F-NEXT: .LBB45_11: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw s0, 4(a0)
@@ -4310,7 +4308,7 @@ define void @mscatter_baseidx_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i16> %
; RV32ZVE32F-NEXT: andi a0, a1, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB45_3
; RV32ZVE32F-NEXT: .LBB45_12: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t5, 0(a0)
@@ -4318,7 +4316,7 @@ define void @mscatter_baseidx_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i16> %
; RV32ZVE32F-NEXT: andi a0, a1, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB45_4
; RV32ZVE32F-NEXT: .LBB45_13: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t3, 0(a0)
@@ -4540,14 +4538,14 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: .LBB46_10: # %cond.store
; RV32ZVE32F-NEXT: lw s1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
; RV32ZVE32F-NEXT: sw s1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
; RV32ZVE32F-NEXT: andi a0, a1, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB46_2
; RV32ZVE32F-NEXT: .LBB46_11: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw s0, 4(a0)
@@ -4555,7 +4553,7 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: andi a0, a1, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB46_3
; RV32ZVE32F-NEXT: .LBB46_12: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t5, 0(a0)
@@ -4563,7 +4561,7 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: andi a0, a1, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB46_4
; RV32ZVE32F-NEXT: .LBB46_13: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t3, 0(a0)
@@ -4786,14 +4784,14 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: .LBB47_10: # %cond.store
; RV32ZVE32F-NEXT: lw s1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
; RV32ZVE32F-NEXT: sw s1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
; RV32ZVE32F-NEXT: andi a0, a1, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB47_2
; RV32ZVE32F-NEXT: .LBB47_11: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw s0, 4(a0)
@@ -4801,7 +4799,7 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: andi a0, a1, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB47_3
; RV32ZVE32F-NEXT: .LBB47_12: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t5, 0(a0)
@@ -4809,7 +4807,7 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: andi a0, a1, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB47_4
; RV32ZVE32F-NEXT: .LBB47_13: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t3, 0(a0)
@@ -5040,14 +5038,14 @@ define void @mscatter_baseidx_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i32> %
; RV32ZVE32F-NEXT: .LBB48_10: # %cond.store
; RV32ZVE32F-NEXT: lw s1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
; RV32ZVE32F-NEXT: sw s1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
; RV32ZVE32F-NEXT: andi a0, a1, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB48_2
; RV32ZVE32F-NEXT: .LBB48_11: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw s0, 4(a0)
@@ -5055,7 +5053,7 @@ define void @mscatter_baseidx_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i32> %
; RV32ZVE32F-NEXT: andi a0, a1, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB48_3
; RV32ZVE32F-NEXT: .LBB48_12: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t5, 0(a0)
@@ -5063,7 +5061,7 @@ define void @mscatter_baseidx_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i32> %
; RV32ZVE32F-NEXT: andi a0, a1, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB48_4
; RV32ZVE32F-NEXT: .LBB48_13: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t3, 0(a0)
@@ -5283,14 +5281,14 @@ define void @mscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: .LBB49_10: # %cond.store
; RV32ZVE32F-NEXT: lw s1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
; RV32ZVE32F-NEXT: sw s1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
; RV32ZVE32F-NEXT: andi a0, a1, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB49_2
; RV32ZVE32F-NEXT: .LBB49_11: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw s0, 4(a0)
@@ -5298,7 +5296,7 @@ define void @mscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: andi a0, a1, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB49_3
; RV32ZVE32F-NEXT: .LBB49_12: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t5, 0(a0)
@@ -5306,7 +5304,7 @@ define void @mscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: andi a0, a1, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB49_4
; RV32ZVE32F-NEXT: .LBB49_13: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t3, 0(a0)
@@ -5527,14 +5525,14 @@ define void @mscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: .LBB50_10: # %cond.store
; RV32ZVE32F-NEXT: lw s1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
; RV32ZVE32F-NEXT: sw s1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
; RV32ZVE32F-NEXT: andi a0, a1, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB50_2
; RV32ZVE32F-NEXT: .LBB50_11: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw s0, 4(a0)
@@ -5542,7 +5540,7 @@ define void @mscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: andi a0, a1, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB50_3
; RV32ZVE32F-NEXT: .LBB50_12: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t5, 0(a0)
@@ -5550,7 +5548,7 @@ define void @mscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: andi a0, a1, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB50_4
; RV32ZVE32F-NEXT: .LBB50_13: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t3, 0(a0)
@@ -5813,14 +5811,14 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
; RV32ZVE32F-NEXT: .LBB51_10: # %cond.store
; RV32ZVE32F-NEXT: lw a2, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
; RV32ZVE32F-NEXT: sw a2, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
; RV32ZVE32F-NEXT: andi a0, a1, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB51_2
; RV32ZVE32F-NEXT: .LBB51_11: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw s1, 4(a0)
@@ -5828,7 +5826,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
; RV32ZVE32F-NEXT: andi a0, a1, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB51_3
; RV32ZVE32F-NEXT: .LBB51_12: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t6, 0(a0)
@@ -5836,7 +5834,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
; RV32ZVE32F-NEXT: andi a0, a1, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB51_4
; RV32ZVE32F-NEXT: .LBB51_13: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t4, 0(a0)
@@ -7101,24 +7099,24 @@ define void @mscatter_v8f32(<8 x float> %val, <8 x ptr> %ptrs, <8 x i1> %m) {
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB67_9: # %cond.store
; RV64ZVE32F-NEXT: ld a0, 0(a0)
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: andi a0, a3, 2
; RV64ZVE32F-NEXT: beqz a0, .LBB67_2
; RV64ZVE32F-NEXT: .LBB67_10: # %cond.store1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vse32.v v10, (t0)
; RV64ZVE32F-NEXT: andi a0, a3, 4
; RV64ZVE32F-NEXT: beqz a0, .LBB67_3
; RV64ZVE32F-NEXT: .LBB67_11: # %cond.store3
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV64ZVE32F-NEXT: vse32.v v10, (a7)
; RV64ZVE32F-NEXT: andi a0, a3, 8
; RV64ZVE32F-NEXT: beqz a0, .LBB67_4
; RV64ZVE32F-NEXT: .LBB67_12: # %cond.store5
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV64ZVE32F-NEXT: vse32.v v10, (a6)
; RV64ZVE32F-NEXT: andi a0, a3, 16
@@ -7126,24 +7124,28 @@ define void @mscatter_v8f32(<8 x float> %val, <8 x ptr> %ptrs, <8 x i1> %m) {
; RV64ZVE32F-NEXT: .LBB67_13: # %cond.store7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v10, (a5)
; RV64ZVE32F-NEXT: andi a0, a3, 32
; RV64ZVE32F-NEXT: beqz a0, .LBB67_6
; RV64ZVE32F-NEXT: .LBB67_14: # %cond.store9
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 5
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v10, (a4)
; RV64ZVE32F-NEXT: andi a0, a3, 64
; RV64ZVE32F-NEXT: beqz a0, .LBB67_7
; RV64ZVE32F-NEXT: .LBB67_15: # %cond.store11
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 6
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: andi a0, a3, -128
; RV64ZVE32F-NEXT: beqz a0, .LBB67_8
; RV64ZVE32F-NEXT: .LBB67_16: # %cond.store13
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a1)
; RV64ZVE32F-NEXT: ret
call void @llvm.masked.scatter.v8f32.v8p0(<8 x float> %val, <8 x ptr> %ptrs, i32 4, <8 x i1> %m)
@@ -7178,7 +7180,7 @@ define void @mscatter_baseidx_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x i8> %
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a2)
; RV64ZVE32F-NEXT: .LBB68_2: # %else
; RV64ZVE32F-NEXT: andi a2, a1, 2
@@ -7189,9 +7191,9 @@ define void @mscatter_baseidx_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x i8> %
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
-; RV64ZVE32F-NEXT: vse32.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 1
+; RV64ZVE32F-NEXT: vse32.v v11, (a2)
; RV64ZVE32F-NEXT: .LBB68_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
@@ -7216,6 +7218,7 @@ define void @mscatter_baseidx_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x i8> %
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB68_9: # %else10
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
@@ -7231,7 +7234,7 @@ define void @mscatter_baseidx_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x i8> %
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
@@ -7242,9 +7245,9 @@ define void @mscatter_baseidx_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x i8> %
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3
-; RV64ZVE32F-NEXT: vse32.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3
+; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB68_7
; RV64ZVE32F-NEXT: .LBB68_14: # %cond.store7
@@ -7254,6 +7257,7 @@ define void @mscatter_baseidx_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x i8> %
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB68_8
@@ -7264,6 +7268,7 @@ define void @mscatter_baseidx_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x i8> %
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB68_11
@@ -7275,6 +7280,7 @@ define void @mscatter_baseidx_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x i8> %
; RV64ZVE32F-NEXT: add a0, a0, a1
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds float, ptr %base, <8 x i8> %idxs
@@ -7310,7 +7316,7 @@ define void @mscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a2)
; RV64ZVE32F-NEXT: .LBB69_2: # %else
; RV64ZVE32F-NEXT: andi a2, a1, 2
@@ -7321,9 +7327,9 @@ define void @mscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
-; RV64ZVE32F-NEXT: vse32.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 1
+; RV64ZVE32F-NEXT: vse32.v v11, (a2)
; RV64ZVE32F-NEXT: .LBB69_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
@@ -7348,6 +7354,7 @@ define void @mscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB69_9: # %else10
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
@@ -7363,7 +7370,7 @@ define void @mscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
@@ -7374,9 +7381,9 @@ define void @mscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3
-; RV64ZVE32F-NEXT: vse32.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3
+; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB69_7
; RV64ZVE32F-NEXT: .LBB69_14: # %cond.store7
@@ -7386,6 +7393,7 @@ define void @mscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB69_8
@@ -7396,6 +7404,7 @@ define void @mscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB69_11
@@ -7407,6 +7416,7 @@ define void @mscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a0, a0, a1
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
%eidxs = sext <8 x i8> %idxs to <8 x i32>
@@ -7444,7 +7454,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a2)
; RV64ZVE32F-NEXT: .LBB70_2: # %else
; RV64ZVE32F-NEXT: andi a2, a1, 2
@@ -7456,9 +7466,9 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
-; RV64ZVE32F-NEXT: vse32.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 1
+; RV64ZVE32F-NEXT: vse32.v v11, (a2)
; RV64ZVE32F-NEXT: .LBB70_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
@@ -7484,6 +7494,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB70_9: # %else10
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
@@ -7500,7 +7511,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
@@ -7512,9 +7523,9 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3
-; RV64ZVE32F-NEXT: vse32.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3
+; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB70_7
; RV64ZVE32F-NEXT: .LBB70_14: # %cond.store7
@@ -7525,6 +7536,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB70_8
@@ -7536,6 +7548,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB70_11
@@ -7548,6 +7561,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a0, a0, a1
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
%eidxs = zext <8 x i8> %idxs to <8 x i32>
@@ -7585,7 +7599,7 @@ define void @mscatter_baseidx_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x i16>
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a2)
; RV64ZVE32F-NEXT: .LBB71_2: # %else
; RV64ZVE32F-NEXT: andi a2, a1, 2
@@ -7596,9 +7610,9 @@ define void @mscatter_baseidx_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x i16>
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
-; RV64ZVE32F-NEXT: vse32.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 1
+; RV64ZVE32F-NEXT: vse32.v v11, (a2)
; RV64ZVE32F-NEXT: .LBB71_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
@@ -7623,6 +7637,7 @@ define void @mscatter_baseidx_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x i16>
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB71_9: # %else10
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
@@ -7638,7 +7653,7 @@ define void @mscatter_baseidx_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x i16>
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
@@ -7649,9 +7664,9 @@ define void @mscatter_baseidx_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x i16>
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3
-; RV64ZVE32F-NEXT: vse32.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3
+; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB71_7
; RV64ZVE32F-NEXT: .LBB71_14: # %cond.store7
@@ -7661,6 +7676,7 @@ define void @mscatter_baseidx_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x i16>
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB71_8
@@ -7671,6 +7687,7 @@ define void @mscatter_baseidx_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x i16>
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB71_11
@@ -7682,6 +7699,7 @@ define void @mscatter_baseidx_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x i16>
; RV64ZVE32F-NEXT: add a0, a0, a1
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds float, ptr %base, <8 x i16> %idxs
@@ -7718,7 +7736,7 @@ define void @mscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a2)
; RV64ZVE32F-NEXT: .LBB72_2: # %else
; RV64ZVE32F-NEXT: andi a2, a1, 2
@@ -7729,9 +7747,9 @@ define void @mscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
-; RV64ZVE32F-NEXT: vse32.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 1
+; RV64ZVE32F-NEXT: vse32.v v11, (a2)
; RV64ZVE32F-NEXT: .LBB72_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
@@ -7756,6 +7774,7 @@ define void @mscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB72_9: # %else10
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
@@ -7771,7 +7790,7 @@ define void @mscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
@@ -7782,9 +7801,9 @@ define void @mscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3
-; RV64ZVE32F-NEXT: vse32.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3
+; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB72_7
; RV64ZVE32F-NEXT: .LBB72_14: # %cond.store7
@@ -7794,6 +7813,7 @@ define void @mscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB72_8
@@ -7804,6 +7824,7 @@ define void @mscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB72_11
@@ -7815,6 +7836,7 @@ define void @mscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a0, a0, a1
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
%eidxs = sext <8 x i16> %idxs to <8 x i32>
@@ -7855,7 +7877,7 @@ define void @mscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: and a3, a3, a1
; RV64ZVE32F-NEXT: slli a3, a3, 2
; RV64ZVE32F-NEXT: add a3, a0, a3
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a3)
; RV64ZVE32F-NEXT: .LBB73_2: # %else
; RV64ZVE32F-NEXT: andi a3, a2, 2
@@ -7867,9 +7889,9 @@ define void @mscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: and a3, a3, a1
; RV64ZVE32F-NEXT: slli a3, a3, 2
; RV64ZVE32F-NEXT: add a3, a0, a3
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
-; RV64ZVE32F-NEXT: vse32.v v12, (a3)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 1
+; RV64ZVE32F-NEXT: vse32.v v11, (a3)
; RV64ZVE32F-NEXT: .LBB73_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
@@ -7895,6 +7917,7 @@ define void @mscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a3, a0, a3
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a3)
; RV64ZVE32F-NEXT: .LBB73_9: # %else10
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
@@ -7911,7 +7934,7 @@ define void @mscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: and a3, a3, a1
; RV64ZVE32F-NEXT: slli a3, a3, 2
; RV64ZVE32F-NEXT: add a3, a0, a3
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
; RV64ZVE32F-NEXT: vse32.v v12, (a3)
; RV64ZVE32F-NEXT: andi a3, a2, 8
@@ -7923,9 +7946,9 @@ define void @mscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: and a3, a3, a1
; RV64ZVE32F-NEXT: slli a3, a3, 2
; RV64ZVE32F-NEXT: add a3, a0, a3
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3
-; RV64ZVE32F-NEXT: vse32.v v12, (a3)
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3
+; RV64ZVE32F-NEXT: vse32.v v10, (a3)
; RV64ZVE32F-NEXT: andi a3, a2, 16
; RV64ZVE32F-NEXT: beqz a3, .LBB73_7
; RV64ZVE32F-NEXT: .LBB73_14: # %cond.store7
@@ -7936,6 +7959,7 @@ define void @mscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a3, a0, a3
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a3)
; RV64ZVE32F-NEXT: andi a3, a2, 32
; RV64ZVE32F-NEXT: bnez a3, .LBB73_8
@@ -7947,6 +7971,7 @@ define void @mscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a3, a0, a3
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a3)
; RV64ZVE32F-NEXT: andi a2, a2, -128
; RV64ZVE32F-NEXT: beqz a2, .LBB73_11
@@ -7959,6 +7984,7 @@ define void @mscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: add a0, a0, a1
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
%eidxs = zext <8 x i16> %idxs to <8 x i32>
@@ -7991,7 +8017,7 @@ define void @mscatter_baseidx_v8f32(<8 x float> %val, ptr %base, <8 x i32> %idxs
; RV64ZVE32F-NEXT: andi a2, a1, 1
; RV64ZVE32F-NEXT: beqz a2, .LBB74_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.store
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -8005,7 +8031,6 @@ define void @mscatter_baseidx_v8f32(<8 x float> %val, ptr %base, <8 x i32> %idxs
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB74_4: # %else2
@@ -8032,6 +8057,7 @@ define void @mscatter_baseidx_v8f32(<8 x float> %val, ptr %base, <8 x i32> %idxs
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 5
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: .LBB74_9: # %else10
; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
@@ -8047,9 +8073,9 @@ define void @mscatter_baseidx_v8f32(<8 x float> %val, ptr %base, <8 x i32> %idxs
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 2
-; RV64ZVE32F-NEXT: vse32.v v14, (a2)
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vse32.v v11, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB74_6
; RV64ZVE32F-NEXT: .LBB74_13: # %cond.store5
@@ -8058,7 +8084,6 @@ define void @mscatter_baseidx_v8f32(<8 x float> %val, ptr %base, <8 x i32> %idxs
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
@@ -8069,6 +8094,7 @@ define void @mscatter_baseidx_v8f32(<8 x float> %val, ptr %base, <8 x i32> %idxs
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 32
; RV64ZVE32F-NEXT: bnez a2, .LBB74_8
@@ -8079,6 +8105,7 @@ define void @mscatter_baseidx_v8f32(<8 x float> %val, ptr %base, <8 x i32> %idxs
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a1, a1, -128
; RV64ZVE32F-NEXT: beqz a1, .LBB74_11
@@ -8090,6 +8117,7 @@ define void @mscatter_baseidx_v8f32(<8 x float> %val, ptr %base, <8 x i32> %idxs
; RV64ZVE32F-NEXT: add a0, a0, a1
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds float, ptr %base, <8 x i32> %idxs
@@ -8449,27 +8477,27 @@ define void @mscatter_v8f64(<8 x double> %val, <8 x ptr> %ptrs, <8 x i1> %m) {
; RV32ZVE32F-NEXT: .LBB80_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB80_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a1, v8
; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 2
; RV32ZVE32F-NEXT: beqz a1, .LBB80_2
; RV32ZVE32F-NEXT: .LBB80_10: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 4
; RV32ZVE32F-NEXT: beqz a1, .LBB80_3
; RV32ZVE32F-NEXT: .LBB80_11: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 8
; RV32ZVE32F-NEXT: beqz a1, .LBB80_4
; RV32ZVE32F-NEXT: .LBB80_12: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
@@ -8627,27 +8655,27 @@ define void @mscatter_baseidx_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x i8>
; RV32ZVE32F-NEXT: .LBB81_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB81_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a1, v8
; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 2
; RV32ZVE32F-NEXT: beqz a1, .LBB81_2
; RV32ZVE32F-NEXT: .LBB81_10: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 4
; RV32ZVE32F-NEXT: beqz a1, .LBB81_3
; RV32ZVE32F-NEXT: .LBB81_11: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 8
; RV32ZVE32F-NEXT: beqz a1, .LBB81_4
; RV32ZVE32F-NEXT: .LBB81_12: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
@@ -8827,27 +8855,27 @@ define void @mscatter_baseidx_sext_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x
; RV32ZVE32F-NEXT: .LBB82_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB82_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a1, v8
; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 2
; RV32ZVE32F-NEXT: beqz a1, .LBB82_2
; RV32ZVE32F-NEXT: .LBB82_10: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 4
; RV32ZVE32F-NEXT: beqz a1, .LBB82_3
; RV32ZVE32F-NEXT: .LBB82_11: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 8
; RV32ZVE32F-NEXT: beqz a1, .LBB82_4
; RV32ZVE32F-NEXT: .LBB82_12: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
@@ -9028,27 +9056,27 @@ define void @mscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x
; RV32ZVE32F-NEXT: .LBB83_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB83_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a1, v8
; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 2
; RV32ZVE32F-NEXT: beqz a1, .LBB83_2
; RV32ZVE32F-NEXT: .LBB83_10: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 4
; RV32ZVE32F-NEXT: beqz a1, .LBB83_3
; RV32ZVE32F-NEXT: .LBB83_11: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 8
; RV32ZVE32F-NEXT: beqz a1, .LBB83_4
; RV32ZVE32F-NEXT: .LBB83_12: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
@@ -9237,27 +9265,27 @@ define void @mscatter_baseidx_v8i16_v8f64(<8 x double> %val, ptr %base, <8 x i16
; RV32ZVE32F-NEXT: .LBB84_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB84_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a1, v8
; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 2
; RV32ZVE32F-NEXT: beqz a1, .LBB84_2
; RV32ZVE32F-NEXT: .LBB84_10: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 4
; RV32ZVE32F-NEXT: beqz a1, .LBB84_3
; RV32ZVE32F-NEXT: .LBB84_11: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 8
; RV32ZVE32F-NEXT: beqz a1, .LBB84_4
; RV32ZVE32F-NEXT: .LBB84_12: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
@@ -9438,27 +9466,27 @@ define void @mscatter_baseidx_sext_v8i16_v8f64(<8 x double> %val, ptr %base, <8
; RV32ZVE32F-NEXT: .LBB85_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB85_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a1, v8
; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 2
; RV32ZVE32F-NEXT: beqz a1, .LBB85_2
; RV32ZVE32F-NEXT: .LBB85_10: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 4
; RV32ZVE32F-NEXT: beqz a1, .LBB85_3
; RV32ZVE32F-NEXT: .LBB85_11: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 8
; RV32ZVE32F-NEXT: beqz a1, .LBB85_4
; RV32ZVE32F-NEXT: .LBB85_12: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
@@ -9640,27 +9668,27 @@ define void @mscatter_baseidx_zext_v8i16_v8f64(<8 x double> %val, ptr %base, <8
; RV32ZVE32F-NEXT: .LBB86_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB86_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a1, v8
; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 2
; RV32ZVE32F-NEXT: beqz a1, .LBB86_2
; RV32ZVE32F-NEXT: .LBB86_10: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 4
; RV32ZVE32F-NEXT: beqz a1, .LBB86_3
; RV32ZVE32F-NEXT: .LBB86_11: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 8
; RV32ZVE32F-NEXT: beqz a1, .LBB86_4
; RV32ZVE32F-NEXT: .LBB86_12: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
@@ -9850,27 +9878,27 @@ define void @mscatter_baseidx_v8i32_v8f64(<8 x double> %val, ptr %base, <8 x i32
; RV32ZVE32F-NEXT: .LBB87_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB87_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a1, v8
; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 2
; RV32ZVE32F-NEXT: beqz a1, .LBB87_2
; RV32ZVE32F-NEXT: .LBB87_10: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 4
; RV32ZVE32F-NEXT: beqz a1, .LBB87_3
; RV32ZVE32F-NEXT: .LBB87_11: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 8
; RV32ZVE32F-NEXT: beqz a1, .LBB87_4
; RV32ZVE32F-NEXT: .LBB87_12: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
@@ -10049,27 +10077,27 @@ define void @mscatter_baseidx_sext_v8i32_v8f64(<8 x double> %val, ptr %base, <8
; RV32ZVE32F-NEXT: .LBB88_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB88_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a1, v8
; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 2
; RV32ZVE32F-NEXT: beqz a1, .LBB88_2
; RV32ZVE32F-NEXT: .LBB88_10: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 4
; RV32ZVE32F-NEXT: beqz a1, .LBB88_3
; RV32ZVE32F-NEXT: .LBB88_11: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 8
; RV32ZVE32F-NEXT: beqz a1, .LBB88_4
; RV32ZVE32F-NEXT: .LBB88_12: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
@@ -10249,27 +10277,27 @@ define void @mscatter_baseidx_zext_v8i32_v8f64(<8 x double> %val, ptr %base, <8
; RV32ZVE32F-NEXT: .LBB89_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB89_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a1, v8
; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 2
; RV32ZVE32F-NEXT: beqz a1, .LBB89_2
; RV32ZVE32F-NEXT: .LBB89_10: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 4
; RV32ZVE32F-NEXT: beqz a1, .LBB89_3
; RV32ZVE32F-NEXT: .LBB89_11: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 8
; RV32ZVE32F-NEXT: beqz a1, .LBB89_4
; RV32ZVE32F-NEXT: .LBB89_12: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
@@ -10473,27 +10501,27 @@ define void @mscatter_baseidx_v8f64(<8 x double> %val, ptr %base, <8 x i64> %idx
; RV32ZVE32F-NEXT: .LBB90_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB90_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a1, v8
; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 2
; RV32ZVE32F-NEXT: beqz a1, .LBB90_2
; RV32ZVE32F-NEXT: .LBB90_10: # %cond.store1
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 4
; RV32ZVE32F-NEXT: beqz a1, .LBB90_3
; RV32ZVE32F-NEXT: .LBB90_11: # %cond.store3
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
; RV32ZVE32F-NEXT: andi a1, a0, 8
; RV32ZVE32F-NEXT: beqz a1, .LBB90_4
; RV32ZVE32F-NEXT: .LBB90_12: # %cond.store5
-; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a1, v10
; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
@@ -10874,7 +10902,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: andi a2, a1, 1
; RV64ZVE32F-NEXT: beqz a2, .LBB92_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.store
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vse8.v v8, (a2)
@@ -10886,7 +10914,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB92_4: # %else2
@@ -10903,11 +10931,11 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB92_8
; RV64ZVE32F-NEXT: .LBB92_7: # %cond.store7
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 4
-; RV64ZVE32F-NEXT: vse8.v v14, (a2)
+; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
+; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB92_8: # %else8
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 32
@@ -10918,7 +10946,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v14, v13, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v14
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 5
; RV64ZVE32F-NEXT: vse8.v v14, (a2)
; RV64ZVE32F-NEXT: .LBB92_10: # %else10
@@ -10940,9 +10968,9 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 9
-; RV64ZVE32F-NEXT: vse8.v v14, (a2)
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v13, v8, 9
+; RV64ZVE32F-NEXT: vse8.v v13, (a2)
; RV64ZVE32F-NEXT: .LBB92_15: # %else18
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 4
@@ -10953,7 +10981,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: # %bb.16: # %cond.store19
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 10
; RV64ZVE32F-NEXT: vse8.v v14, (a2)
; RV64ZVE32F-NEXT: .LBB92_17: # %else20
@@ -10964,9 +10992,9 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 11
-; RV64ZVE32F-NEXT: vse8.v v14, (a2)
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 11
+; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB92_19: # %else22
; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 51
@@ -10975,9 +11003,9 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: # %bb.20: # %cond.store23
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 12
-; RV64ZVE32F-NEXT: vse8.v v14, (a2)
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 12
+; RV64ZVE32F-NEXT: vse8.v v11, (a2)
; RV64ZVE32F-NEXT: .LBB92_21: # %else24
; RV64ZVE32F-NEXT: slli a2, a1, 50
; RV64ZVE32F-NEXT: bgez a2, .LBB92_23
@@ -10986,9 +11014,9 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v11, v13, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 13
-; RV64ZVE32F-NEXT: vse8.v v14, (a2)
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 13
+; RV64ZVE32F-NEXT: vse8.v v11, (a2)
; RV64ZVE32F-NEXT: .LBB92_23: # %else26
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 49
@@ -11117,7 +11145,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: .LBB92_49: # %cond.store3
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 2
; RV64ZVE32F-NEXT: vse8.v v14, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
@@ -11127,16 +11155,16 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 3
-; RV64ZVE32F-NEXT: vse8.v v14, (a2)
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3
+; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: bnez a2, .LBB92_7
; RV64ZVE32F-NEXT: j .LBB92_8
; RV64ZVE32F-NEXT: .LBB92_51: # %cond.store11
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 6
; RV64ZVE32F-NEXT: vse8.v v14, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 128
@@ -11146,24 +11174,24 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 7
-; RV64ZVE32F-NEXT: vse8.v v14, (a2)
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v13, v8, 7
+; RV64ZVE32F-NEXT: vse8.v v13, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 256
; RV64ZVE32F-NEXT: beqz a2, .LBB92_13
; RV64ZVE32F-NEXT: .LBB92_53: # %cond.store15
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 8
-; RV64ZVE32F-NEXT: vse8.v v14, (a2)
+; RV64ZVE32F-NEXT: vslidedown.vi v13, v8, 8
+; RV64ZVE32F-NEXT: vse8.v v13, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 512
; RV64ZVE32F-NEXT: bnez a2, .LBB92_14
; RV64ZVE32F-NEXT: j .LBB92_15
; RV64ZVE32F-NEXT: .LBB92_54: # %cond.store27
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 14
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: slli a2, a1, 48
@@ -11173,9 +11201,9 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma
-; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 15
-; RV64ZVE32F-NEXT: vse8.v v12, (a2)
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 15
+; RV64ZVE32F-NEXT: vse8.v v11, (a2)
; RV64ZVE32F-NEXT: slli a2, a1, 47
; RV64ZVE32F-NEXT: bgez a2, .LBB92_26
; RV64ZVE32F-NEXT: .LBB92_56: # %cond.store31
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
index c9421c0c7258f6..ed1c18de5b1076 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
@@ -6582,7 +6582,7 @@ define i64 @vreduce_mul_v32i64(ptr %x) {
; RV32-NEXT: vmul.vv v8, v8, v16
; RV32-NEXT: vrgather.vi v16, v8, 1
; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: vsetivli zero, 1, e32, m8, ta, ma
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vmv.x.s a1, v8
@@ -6634,7 +6634,7 @@ define i64 @vreduce_mul_v64i64(ptr %x) nounwind {
; RV32-NEXT: vmul.vv v8, v8, v16
; RV32-NEXT: vrgather.vi v16, v8, 1
; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: vsetivli zero, 1, e32, m8, ta, ma
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vmv.x.s a1, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
index 4a0e156326bbd7..6ec3576ec27724 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
@@ -470,41 +470,40 @@ declare <32 x double> @llvm.experimental.vp.strided.load.v32f64.p0.i32(ptr, i32,
define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_load_v33f64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: li a3, 32
+; CHECK-RV32-NEXT: li a5, 32
; CHECK-RV32-NEXT: vmv1r.v v8, v0
-; CHECK-RV32-NEXT: mv a5, a4
-; CHECK-RV32-NEXT: bltu a4, a3, .LBB35_2
+; CHECK-RV32-NEXT: mv a3, a4
+; CHECK-RV32-NEXT: bltu a4, a5, .LBB35_2
; CHECK-RV32-NEXT: # %bb.1:
-; CHECK-RV32-NEXT: li a5, 32
+; CHECK-RV32-NEXT: li a3, 32
; CHECK-RV32-NEXT: .LBB35_2:
-; CHECK-RV32-NEXT: addi a3, a5, -16
-; CHECK-RV32-NEXT: sltu a6, a5, a3
-; CHECK-RV32-NEXT: addi a7, a6, -1
-; CHECK-RV32-NEXT: li a6, 16
-; CHECK-RV32-NEXT: and a7, a7, a3
-; CHECK-RV32-NEXT: mv a3, a5
-; CHECK-RV32-NEXT: bltu a5, a6, .LBB35_4
+; CHECK-RV32-NEXT: mul a5, a3, a2
+; CHECK-RV32-NEXT: addi a6, a4, -32
+; CHECK-RV32-NEXT: sltu a4, a4, a6
+; CHECK-RV32-NEXT: addi a4, a4, -1
+; CHECK-RV32-NEXT: and a6, a4, a6
+; CHECK-RV32-NEXT: li a4, 16
+; CHECK-RV32-NEXT: add a5, a1, a5
+; CHECK-RV32-NEXT: bltu a6, a4, .LBB35_4
; CHECK-RV32-NEXT: # %bb.3:
-; CHECK-RV32-NEXT: li a3, 16
+; CHECK-RV32-NEXT: li a6, 16
; CHECK-RV32-NEXT: .LBB35_4:
-; CHECK-RV32-NEXT: mul t0, a3, a2
-; CHECK-RV32-NEXT: add t0, a1, t0
-; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 2
-; CHECK-RV32-NEXT: vsetvli zero, a7, e64, m8, ta, ma
-; CHECK-RV32-NEXT: vlse64.v v16, (t0), a2, v0.t
-; CHECK-RV32-NEXT: mul a7, a5, a2
-; CHECK-RV32-NEXT: addi a5, a4, -32
-; CHECK-RV32-NEXT: sltu a4, a4, a5
-; CHECK-RV32-NEXT: addi a4, a4, -1
-; CHECK-RV32-NEXT: and a5, a4, a5
-; CHECK-RV32-NEXT: add a4, a1, a7
-; CHECK-RV32-NEXT: bltu a5, a6, .LBB35_6
-; CHECK-RV32-NEXT: # %bb.5:
-; CHECK-RV32-NEXT: li a5, 16
-; CHECK-RV32-NEXT: .LBB35_6:
; CHECK-RV32-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 4
+; CHECK-RV32-NEXT: vsetvli zero, a6, e64, m8, ta, ma
+; CHECK-RV32-NEXT: vlse64.v v16, (a5), a2, v0.t
+; CHECK-RV32-NEXT: addi a5, a3, -16
+; CHECK-RV32-NEXT: sltu a6, a3, a5
+; CHECK-RV32-NEXT: addi a6, a6, -1
+; CHECK-RV32-NEXT: and a5, a6, a5
+; CHECK-RV32-NEXT: bltu a3, a4, .LBB35_6
+; CHECK-RV32-NEXT: # %bb.5:
+; CHECK-RV32-NEXT: li a3, 16
+; CHECK-RV32-NEXT: .LBB35_6:
+; CHECK-RV32-NEXT: mul a4, a3, a2
+; CHECK-RV32-NEXT: add a4, a1, a4
+; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 2
; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v24, (a4), a2, v0.t
; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
@@ -512,51 +511,49 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask
; CHECK-RV32-NEXT: vlse64.v v8, (a1), a2, v0.t
; CHECK-RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-RV32-NEXT: vse64.v v8, (a0)
-; CHECK-RV32-NEXT: addi a1, a0, 256
-; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m8, ta, ma
+; CHECK-RV32-NEXT: addi a1, a0, 128
; CHECK-RV32-NEXT: vse64.v v24, (a1)
-; CHECK-RV32-NEXT: addi a0, a0, 128
-; CHECK-RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-RV32-NEXT: addi a0, a0, 256
+; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-RV32-NEXT: vse64.v v16, (a0)
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_load_v33f64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: li a4, 32
+; CHECK-RV64-NEXT: li a5, 32
; CHECK-RV64-NEXT: vmv1r.v v8, v0
-; CHECK-RV64-NEXT: mv a5, a3
-; CHECK-RV64-NEXT: bltu a3, a4, .LBB35_2
+; CHECK-RV64-NEXT: mv a4, a3
+; CHECK-RV64-NEXT: bltu a3, a5, .LBB35_2
; CHECK-RV64-NEXT: # %bb.1:
-; CHECK-RV64-NEXT: li a5, 32
+; CHECK-RV64-NEXT: li a4, 32
; CHECK-RV64-NEXT: .LBB35_2:
-; CHECK-RV64-NEXT: addi a4, a5, -16
-; CHECK-RV64-NEXT: sltu a6, a5, a4
-; CHECK-RV64-NEXT: addi a7, a6, -1
-; CHECK-RV64-NEXT: li a6, 16
-; CHECK-RV64-NEXT: and a7, a7, a4
-; CHECK-RV64-NEXT: mv a4, a5
-; CHECK-RV64-NEXT: bltu a5, a6, .LBB35_4
+; CHECK-RV64-NEXT: mul a5, a4, a2
+; CHECK-RV64-NEXT: addi a6, a3, -32
+; CHECK-RV64-NEXT: sltu a3, a3, a6
+; CHECK-RV64-NEXT: addi a3, a3, -1
+; CHECK-RV64-NEXT: and a6, a3, a6
+; CHECK-RV64-NEXT: li a3, 16
+; CHECK-RV64-NEXT: add a5, a1, a5
+; CHECK-RV64-NEXT: bltu a6, a3, .LBB35_4
; CHECK-RV64-NEXT: # %bb.3:
-; CHECK-RV64-NEXT: li a4, 16
+; CHECK-RV64-NEXT: li a6, 16
; CHECK-RV64-NEXT: .LBB35_4:
-; CHECK-RV64-NEXT: mul t0, a4, a2
-; CHECK-RV64-NEXT: add t0, a1, t0
-; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 2
-; CHECK-RV64-NEXT: vsetvli zero, a7, e64, m8, ta, ma
-; CHECK-RV64-NEXT: vlse64.v v16, (t0), a2, v0.t
-; CHECK-RV64-NEXT: mul a7, a5, a2
-; CHECK-RV64-NEXT: addi a5, a3, -32
-; CHECK-RV64-NEXT: sltu a3, a3, a5
-; CHECK-RV64-NEXT: addi a3, a3, -1
-; CHECK-RV64-NEXT: and a5, a3, a5
-; CHECK-RV64-NEXT: add a3, a1, a7
-; CHECK-RV64-NEXT: bltu a5, a6, .LBB35_6
-; CHECK-RV64-NEXT: # %bb.5:
-; CHECK-RV64-NEXT: li a5, 16
-; CHECK-RV64-NEXT: .LBB35_6:
; CHECK-RV64-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 4
+; CHECK-RV64-NEXT: vsetvli zero, a6, e64, m8, ta, ma
+; CHECK-RV64-NEXT: vlse64.v v16, (a5), a2, v0.t
+; CHECK-RV64-NEXT: addi a5, a4, -16
+; CHECK-RV64-NEXT: sltu a6, a4, a5
+; CHECK-RV64-NEXT: addi a6, a6, -1
+; CHECK-RV64-NEXT: and a5, a6, a5
+; CHECK-RV64-NEXT: bltu a4, a3, .LBB35_6
+; CHECK-RV64-NEXT: # %bb.5:
+; CHECK-RV64-NEXT: li a4, 16
+; CHECK-RV64-NEXT: .LBB35_6:
+; CHECK-RV64-NEXT: mul a3, a4, a2
+; CHECK-RV64-NEXT: add a3, a1, a3
+; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 2
; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v24, (a3), a2, v0.t
; CHECK-RV64-NEXT: vsetvli zero, a4, e64, m8, ta, ma
@@ -564,11 +561,10 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask
; CHECK-RV64-NEXT: vlse64.v v8, (a1), a2, v0.t
; CHECK-RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-RV64-NEXT: vse64.v v8, (a0)
-; CHECK-RV64-NEXT: addi a1, a0, 256
-; CHECK-RV64-NEXT: vsetivli zero, 1, e64, m8, ta, ma
+; CHECK-RV64-NEXT: addi a1, a0, 128
; CHECK-RV64-NEXT: vse64.v v24, (a1)
-; CHECK-RV64-NEXT: addi a0, a0, 128
-; CHECK-RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-RV64-NEXT: addi a0, a0, 256
+; CHECK-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-RV64-NEXT: vse64.v v16, (a0)
; CHECK-RV64-NEXT: ret
%v = call <33 x double> @llvm.experimental.vp.strided.load.v33f64.p0.i64(ptr %ptr, i64 %stride, <33 x i1> %mask, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
index 8f4828a85e06ed..4442b78884bf53 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
@@ -343,23 +343,23 @@ define void @mscatter_v4i16_align1(<4 x i16> %val, <4 x ptr> %ptrs, <4 x i1> %m)
; RV64-SLOW-NEXT: .LBB6_5: # %cond.store
; RV64-SLOW-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; RV64-SLOW-NEXT: vmv.x.s a1, v8
-; RV64-SLOW-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-SLOW-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV64-SLOW-NEXT: vmv.x.s a2, v10
+; RV64-SLOW-NEXT: srli a3, a1, 8
+; RV64-SLOW-NEXT: sb a3, 1(a2)
; RV64-SLOW-NEXT: sb a1, 0(a2)
-; RV64-SLOW-NEXT: srli a1, a1, 8
-; RV64-SLOW-NEXT: sb a1, 1(a2)
; RV64-SLOW-NEXT: andi a1, a0, 2
; RV64-SLOW-NEXT: beqz a1, .LBB6_2
; RV64-SLOW-NEXT: .LBB6_6: # %cond.store1
; RV64-SLOW-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; RV64-SLOW-NEXT: vslidedown.vi v9, v8, 1
; RV64-SLOW-NEXT: vmv.x.s a1, v9
-; RV64-SLOW-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; RV64-SLOW-NEXT: vslidedown.vi v12, v10, 1
-; RV64-SLOW-NEXT: vmv.x.s a2, v12
+; RV64-SLOW-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-SLOW-NEXT: vslidedown.vi v9, v10, 1
+; RV64-SLOW-NEXT: vmv.x.s a2, v9
+; RV64-SLOW-NEXT: srli a3, a1, 8
+; RV64-SLOW-NEXT: sb a3, 1(a2)
; RV64-SLOW-NEXT: sb a1, 0(a2)
-; RV64-SLOW-NEXT: srli a1, a1, 8
-; RV64-SLOW-NEXT: sb a1, 1(a2)
; RV64-SLOW-NEXT: andi a1, a0, 4
; RV64-SLOW-NEXT: beqz a1, .LBB6_3
; RV64-SLOW-NEXT: .LBB6_7: # %cond.store3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
index e36d1286efb0ad..bb75ad2cbd7e5e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
@@ -449,12 +449,11 @@ define <33 x double> @vpload_v33f64(ptr %ptr, <33 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: vle64.v v8, (a1), v0.t
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vse64.v v8, (a0)
-; CHECK-NEXT: addi a1, a0, 256
-; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
-; CHECK-NEXT: vse64.v v24, (a1)
-; CHECK-NEXT: addi a0, a0, 128
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vse64.v v16, (a0)
+; CHECK-NEXT: addi a1, a0, 128
+; CHECK-NEXT: vse64.v v16, (a1)
+; CHECK-NEXT: addi a0, a0, 256
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT: vse64.v v24, (a0)
; CHECK-NEXT: ret
%load = call <33 x double> @llvm.vp.load.v33f64.p0(ptr %ptr, <33 x i1> %m, i32 %evl)
ret <33 x double> %load
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll
index 4fbc4b4bed9606..47b4b612311222 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll
@@ -46,7 +46,7 @@ declare half @llvm.riscv.vfmv.f.s.nxv8f16(<vscale x 8 x half>)
define half @intrinsic_vfmv.f.s_s_nxv8f16(<vscale x 8 x half> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@@ -59,7 +59,7 @@ declare half @llvm.riscv.vfmv.f.s.nxv16f16(<vscale x 16 x half>)
define half @intrinsic_vfmv.f.s_s_nxv16f16(<vscale x 16 x half> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@@ -72,7 +72,7 @@ declare half @llvm.riscv.vfmv.f.s.nxv32f16(<vscale x 32 x half>)
define half @intrinsic_vfmv.f.s_s_nxv32f16(<vscale x 32 x half> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@@ -111,7 +111,7 @@ declare float @llvm.riscv.vfmv.f.s.nxv4f32(<vscale x 4 x float>)
define float @intrinsic_vfmv.f.s_s_nxv4f32(<vscale x 4 x float> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@@ -124,7 +124,7 @@ declare float @llvm.riscv.vfmv.f.s.nxv8f32(<vscale x 8 x float>)
define float @intrinsic_vfmv.f.s_s_nxv8f32(<vscale x 8 x float> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@@ -137,7 +137,7 @@ declare float @llvm.riscv.vfmv.f.s.nxv16f32(<vscale x 16 x float>)
define float @intrinsic_vfmv.f.s_s_nxv16f32(<vscale x 16 x float> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@@ -163,7 +163,7 @@ declare double @llvm.riscv.vfmv.f.s.nxv2f64(<vscale x 2 x double>)
define double @intrinsic_vfmv.f.s_s_nxv2f64(<vscale x 2 x double> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@@ -176,7 +176,7 @@ declare double @llvm.riscv.vfmv.f.s.nxv4f64(<vscale x 4 x double>)
define double @intrinsic_vfmv.f.s_s_nxv4f64(<vscale x 4 x double> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@@ -189,7 +189,7 @@ declare double @llvm.riscv.vfmv.f.s.nxv8f64(<vscale x 8 x double>)
define double @intrinsic_vfmv.f.s_s_nxv8f64(<vscale x 8 x double> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index ca71e83333fbfd..4ef28252b2a9d8 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -681,32 +681,34 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV32MV-NEXT: vmsne.vv v0, v8, v14
; RV32MV-NEXT: vmv.v.i v8, 0
; RV32MV-NEXT: vmerge.vim v8, v8, -1, v0
-; RV32MV-NEXT: vsetivli zero, 1, e32, m2, ta, ma
+; RV32MV-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32MV-NEXT: vse32.v v8, (s0)
-; RV32MV-NEXT: vslidedown.vi v10, v8, 1
-; RV32MV-NEXT: vmv.x.s a0, v10
-; RV32MV-NEXT: vslidedown.vi v10, v8, 2
-; RV32MV-NEXT: vmv.x.s a1, v10
-; RV32MV-NEXT: slli a2, a1, 1
-; RV32MV-NEXT: sub a2, a2, a0
-; RV32MV-NEXT: sw a2, 4(s0)
+; RV32MV-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32MV-NEXT: vslidedown.vi v10, v8, 4
; RV32MV-NEXT: vmv.x.s a0, v10
-; RV32MV-NEXT: srli a2, a0, 30
+; RV32MV-NEXT: srli a1, a0, 30
; RV32MV-NEXT: vslidedown.vi v10, v8, 5
-; RV32MV-NEXT: vmv.x.s a3, v10
-; RV32MV-NEXT: slli a3, a3, 2
-; RV32MV-NEXT: or a2, a3, a2
-; RV32MV-NEXT: andi a2, a2, 7
-; RV32MV-NEXT: sb a2, 12(s0)
-; RV32MV-NEXT: srli a1, a1, 31
+; RV32MV-NEXT: vmv.x.s a2, v10
+; RV32MV-NEXT: slli a2, a2, 2
+; RV32MV-NEXT: or a1, a2, a1
+; RV32MV-NEXT: andi a1, a1, 7
+; RV32MV-NEXT: sb a1, 12(s0)
+; RV32MV-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32MV-NEXT: vslidedown.vi v9, v8, 1
+; RV32MV-NEXT: vmv.x.s a1, v9
+; RV32MV-NEXT: vslidedown.vi v9, v8, 2
+; RV32MV-NEXT: vmv.x.s a2, v9
+; RV32MV-NEXT: slli a3, a2, 1
+; RV32MV-NEXT: sub a3, a3, a1
+; RV32MV-NEXT: sw a3, 4(s0)
+; RV32MV-NEXT: srli a2, a2, 31
; RV32MV-NEXT: vslidedown.vi v8, v8, 3
-; RV32MV-NEXT: vmv.x.s a2, v8
-; RV32MV-NEXT: andi a2, a2, 1
-; RV32MV-NEXT: slli a2, a2, 1
+; RV32MV-NEXT: vmv.x.s a1, v8
+; RV32MV-NEXT: andi a1, a1, 1
+; RV32MV-NEXT: slli a1, a1, 1
; RV32MV-NEXT: slli a0, a0, 2
-; RV32MV-NEXT: or a0, a1, a0
-; RV32MV-NEXT: or a0, a0, a2
+; RV32MV-NEXT: or a0, a2, a0
+; RV32MV-NEXT: or a0, a0, a1
; RV32MV-NEXT: sw a0, 8(s0)
; RV32MV-NEXT: csrr a0, vlenb
; RV32MV-NEXT: slli a0, a0, 1
@@ -783,6 +785,7 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV64MV-NEXT: sb a3, 12(a0)
; RV64MV-NEXT: vmv.x.s a3, v8
; RV64MV-NEXT: and a1, a3, a1
+; RV64MV-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV64MV-NEXT: vslidedown.vi v8, v8, 1
; RV64MV-NEXT: vmv.x.s a3, v8
; RV64MV-NEXT: slli a4, a3, 33
More information about the llvm-commits
mailing list