[llvm] [RISCV] Don't use `x0, x0` form to retain SEW/LMUL when AVL is imm (PR #169307)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Nov 24 02:55:37 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Pengcheng Wang (wangpc-pp)
<details>
<summary>Changes</summary>
Usually, the `vl` value of `vsetivli` form can be retrieved early
in the decoding stage of CPU pipeline. While for the `x0,x0` form
of `vsetvli`, the value of `vl` will be deferred to later stages.
So if the SEW/LMUL ratio doesn't change, we can use the `vsetivli`
form to replace the `x0,x0` form.
---
Patch is 948.11 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/169307.diff
120 Files Affected:
- (modified) llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp (+47-25)
- (modified) llvm/test/CodeGen/RISCV/double_reduct.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/pr94265.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/65704-illegal-instruction.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/compressstore.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/concat-vector-insert-elt.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll (+10-10)
- (modified) llvm/test/CodeGen/RISCV/rvv/constant-folding.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/expandload.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll (+10-10)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll (+18-18)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll (+16-16)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll (+12-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll (+36-36)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll (+36-36)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum.ll (+32-32)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximumnum.ll (+10-10)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum.ll (+32-32)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimumnum.ll (+10-10)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll (+28-28)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll (+56-56)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll (+297-297)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll (+22-22)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll (+12-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll (+36-36)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll (+41-41)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll (+36-36)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll (+41-41)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll (+36-36)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll (+18-18)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector-shuffle.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll (+16-25)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll (+26-26)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll (+9-13)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll (+12-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll (+12-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll (+38-38)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll (+20-20)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll (+447-648)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll (+246-264)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll (+39-39)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll (+11-11)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll (+64-64)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll (+12-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-bf16.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll (+24-24)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll (+28-28)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-changes-length.ll (+7-7)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave.ll (+10-10)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave2.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-fp-interleave.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-fp.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-int-interleave.ll (+5-5)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-int.ll (+13-13)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-sat-clip.ll (+40-40)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll (+35-35)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll (+24-24)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll (+24-24)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfpext-constrained-sdnode.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll (+42-42)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptrunc-constrained-sdnode.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfw-web-simplification.ll (+12-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmacc.ll (+40-40)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmaccbf16.ll (+13-13)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-fp-vp-bf16.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrol.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vror.ll (+12-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll (+12-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vw-web-simplification.ll (+19-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd-mask.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsll.ll (+31-31)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub-mask.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll (+7-7)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zvqdotq.ll (+34-35)
- (modified) llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll (+34-34)
- (modified) llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll (+5-5)
- (modified) llvm/test/CodeGen/RISCV/rvv/pr141907.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/pr165232.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/pr63459.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/regcoal-liveinterval-pruning-crash.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/rvv-vmerge-to-vmv.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/trunc-select-to-max-usat.ll (+10-10)
- (modified) llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/vector-extract-last-active.ll (+13-14)
- (modified) llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/rvv/vl-opt.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll (+5-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir (+2-2)
- (modified) llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll (+8-8)
``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index e5819d90526d9..cf372baa597cc 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -47,6 +47,10 @@ static cl::opt<bool> EnsureWholeVectorRegisterMoveValidVTYPE(
"vill is cleared"),
cl::init(true));
+static cl::opt<bool> UseImm("use-imm-vset", cl::Hidden,
+ cl::desc("Use vsetivli to replace x0,x0 form."),
+ cl::init(true));
+
namespace {
/// Given a virtual register \p Reg, return the corresponding VNInfo for it.
@@ -1167,7 +1171,13 @@ void RISCVInsertVSETVLI::insertVSETVLI(MachineBasicBlock &MBB,
// Use X0, X0 form if the AVL is the same and the SEW+LMUL gives the same
// VLMAX.
if (Info.hasSameAVL(PrevInfo) && Info.hasSameVLMAX(PrevInfo)) {
- auto MI = BuildMI(MBB, InsertPt, DL, TII->get(RISCV::PseudoVSETVLIX0X0))
+ auto MI =
+ Info.hasAVLImm() && UseImm
+ ? BuildMI(MBB, InsertPt, DL, TII->get(RISCV::PseudoVSETIVLI))
+ .addReg(RISCV::X0, RegState::Define | RegState::Dead)
+ .addImm(PrevInfo.getAVLImm())
+ .addImm(Info.encodeVTYPE())
+ : BuildMI(MBB, InsertPt, DL, TII->get(RISCV::PseudoVSETVLIX0X0))
.addReg(RISCV::X0, RegState::Define | RegState::Dead)
.addReg(RISCV::X0, RegState::Kill)
.addImm(Info.encodeVTYPE())
@@ -1734,35 +1744,47 @@ bool RISCVInsertVSETVLI::canMutatePriorConfig(
// demanded, or b) we can't rewrite the former to be the later for
// implementation reasons.
if (!RISCVInstrInfo::isVLPreservingConfig(MI)) {
- if (Used.VLAny)
- return false;
-
- if (Used.VLZeroness) {
- if (RISCVInstrInfo::isVLPreservingConfig(PrevMI))
- return false;
- if (!getInfoForVSETVLI(PrevMI).hasEquallyZeroAVL(getInfoForVSETVLI(MI),
- LIS))
- return false;
+ bool Skip = false;
+ if (MI.getOpcode() == RISCV::PseudoVSETIVLI &&
+ PrevMI.getOpcode() == RISCV::PseudoVSETIVLI &&
+ PrevMI.getOperand(0).isDead()) {
+ VSETVLIInfo PrevInfo = getInfoForVSETVLI(PrevMI);
+ VSETVLIInfo Info = getInfoForVSETVLI(MI);
+ if (PrevInfo.hasAVLImm() && Info.hasAVLImm() &&
+ PrevInfo.getAVLImm() == Info.getAVLImm())
+ Skip = true;
}
+ if (!Skip) {
+ if (Used.VLAny)
+ return false;
+
+ if (Used.VLZeroness) {
+ if (RISCVInstrInfo::isVLPreservingConfig(PrevMI))
+ return false;
+ if (!getInfoForVSETVLI(PrevMI).hasEquallyZeroAVL(getInfoForVSETVLI(MI),
+ LIS))
+ return false;
+ }
- auto &AVL = MI.getOperand(1);
+ auto &AVL = MI.getOperand(1);
- // If the AVL is a register, we need to make sure its definition is the same
- // at PrevMI as it was at MI.
- if (AVL.isReg() && AVL.getReg() != RISCV::X0) {
- VNInfo *VNI = getVNInfoFromReg(AVL.getReg(), MI, LIS);
- VNInfo *PrevVNI = getVNInfoFromReg(AVL.getReg(), PrevMI, LIS);
- if (!VNI || !PrevVNI || VNI != PrevVNI)
+ // If the AVL is a register, we need to make sure its definition is the
+ // same at PrevMI as it was at MI.
+ if (AVL.isReg() && AVL.getReg() != RISCV::X0) {
+ VNInfo *VNI = getVNInfoFromReg(AVL.getReg(), MI, LIS);
+ VNInfo *PrevVNI = getVNInfoFromReg(AVL.getReg(), PrevMI, LIS);
+ if (!VNI || !PrevVNI || VNI != PrevVNI)
+ return false;
+ }
+
+ // If we define VL and need to move the definition up, check we can extend
+ // the live interval upwards from MI to PrevMI.
+ Register VL = MI.getOperand(0).getReg();
+ if (VL.isVirtual() && LIS &&
+ LIS->getInterval(VL).overlaps(LIS->getInstructionIndex(PrevMI),
+ LIS->getInstructionIndex(MI)))
return false;
}
-
- // If we define VL and need to move the definition up, check we can extend
- // the live interval upwards from MI to PrevMI.
- Register VL = MI.getOperand(0).getReg();
- if (VL.isVirtual() && LIS &&
- LIS->getInterval(VL).overlaps(LIS->getInstructionIndex(PrevMI),
- LIS->getInstructionIndex(MI)))
- return false;
}
assert(PrevMI.getOperand(2).isImm() && MI.getOperand(2).isImm());
diff --git a/llvm/test/CodeGen/RISCV/double_reduct.ll b/llvm/test/CodeGen/RISCV/double_reduct.ll
index cecdd77a079e4..f4a0ab227edf4 100644
--- a/llvm/test/CodeGen/RISCV/double_reduct.ll
+++ b/llvm/test/CodeGen/RISCV/double_reduct.ll
@@ -90,7 +90,7 @@ define i16 @add_ext_i16(<16 x i8> %a, <16 x i8> %b) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vwaddu.vv v10, v8, v9
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; CHECK-NEXT: vmv.s.x v8, zero
; CHECK-NEXT: vredsum.vs v8, v10, v8
; CHECK-NEXT: vmv.x.s a0, v8
diff --git a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
index 85867a4ab2c6f..42277a4ff6556 100644
--- a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
+++ b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
@@ -57,11 +57,11 @@ define void @_Z3foov() {
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl1r.v v14, (a0) # vscale x 8-byte Folded Reload
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e16, m2, tu, mu
; CHECK-NEXT: vsext.vf2 v8, v14, v0.t
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_44)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_44)
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, ma
; CHECK-NEXT: vle16.v v14, (a0)
; CHECK-NEXT: lui a0, %hi(var_47)
; CHECK-NEXT: addi a0, a0, %lo(var_47)
diff --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll
index 1216d3000e8c8..b01edf4e3f182 100644
--- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll
+++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll
@@ -9,7 +9,7 @@ define i16 @ctz_v4i32(<4 x i32> %a) {
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vmsne.vi v0, v8, 0
-; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV32-NEXT: vmv.v.i v8, 0
; RV32-NEXT: vmerge.vim v8, v8, -1, v0
; RV32-NEXT: vid.v v9
@@ -26,7 +26,7 @@ define i16 @ctz_v4i32(<4 x i32> %a) {
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT: vmsne.vi v0, v8, 0
-; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64-NEXT: vmv.v.i v8, 0
; RV64-NEXT: vmerge.vim v8, v8, -1, v0
; RV64-NEXT: vid.v v9
diff --git a/llvm/test/CodeGen/RISCV/pr94265.ll b/llvm/test/CodeGen/RISCV/pr94265.ll
index f92cdb4ca7395..e97f924d6ae4e 100644
--- a/llvm/test/CodeGen/RISCV/pr94265.ll
+++ b/llvm/test/CodeGen/RISCV/pr94265.ll
@@ -11,7 +11,7 @@ define <8 x i16> @PR94265(<8 x i32> %a0) #0 {
; RV32I-NEXT: vsra.vi v10, v8, 31
; RV32I-NEXT: vsrl.vi v10, v10, 26
; RV32I-NEXT: vadd.vv v8, v8, v10
-; RV32I-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32I-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV32I-NEXT: vnsrl.wi v10, v8, 6
; RV32I-NEXT: vsll.vi v8, v10, 10
; RV32I-NEXT: ret
@@ -22,7 +22,7 @@ define <8 x i16> @PR94265(<8 x i32> %a0) #0 {
; RV64I-NEXT: vsra.vi v10, v8, 31
; RV64I-NEXT: vsrl.vi v10, v10, 26
; RV64I-NEXT: vadd.vv v8, v8, v10
-; RV64I-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV64I-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64I-NEXT: vnsrl.wi v10, v8, 6
; RV64I-NEXT: vsll.vi v8, v10, 10
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/65704-illegal-instruction.ll b/llvm/test/CodeGen/RISCV/rvv/65704-illegal-instruction.ll
index 44fd9046fa0e0..1a162cc160da7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/65704-illegal-instruction.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/65704-illegal-instruction.ll
@@ -26,7 +26,7 @@ define void @foo(<vscale x 8 x i8> %0) {
; CHECK-NEXT: vsetivli zero, 0, e8, m1, tu, ma
; CHECK-NEXT: vslideup.vi v9, v10, 0
; CHECK-NEXT: vslideup.vi v8, v10, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; CHECK-NEXT: vmv.x.s s1, v9
; CHECK-NEXT: vmv.x.s s2, v8
; CHECK-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
diff --git a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
index 69822e9d9d2e3..50c51bfe62caf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
@@ -797,7 +797,7 @@ define void @test_compresstore_v32i64(ptr %p, <32 x i1> %mask, <32 x i64> %data)
; RV64-NEXT: vse64.v v24, (a0)
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
-; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64-NEXT: vmv.x.s a1, v0
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vcompress.vm v8, v16, v24
@@ -819,7 +819,7 @@ define void @test_compresstore_v32i64(ptr %p, <32 x i1> %mask, <32 x i64> %data)
; RV32-NEXT: vse64.v v24, (a0)
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v24, v0, 2
-; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV32-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV32-NEXT: vmv.x.s a1, v0
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vcompress.vm v8, v16, v24
diff --git a/llvm/test/CodeGen/RISCV/rvv/concat-vector-insert-elt.ll b/llvm/test/CodeGen/RISCV/rvv/concat-vector-insert-elt.ll
index 1343b64b876dc..6d60313bffc90 100644
--- a/llvm/test/CodeGen/RISCV/rvv/concat-vector-insert-elt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/concat-vector-insert-elt.ll
@@ -10,7 +10,7 @@ define void @v4xi8_concat_vector_insert_idx0(ptr %a, ptr %b, i8 %x) {
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vle8.v v9, (a1)
-; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, ma
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, tu, ma
; CHECK-NEXT: vmv.s.x v8, a2
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 2
@@ -50,7 +50,7 @@ define void @v4xi8_concat_vector_insert_idx2(ptr %a, ptr %b, i8 %x) {
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a1)
; CHECK-NEXT: vle8.v v9, (a0)
-; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, ma
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, tu, ma
; CHECK-NEXT: vmv.s.x v8, a2
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; CHECK-NEXT: vslideup.vi v9, v8, 2
@@ -103,7 +103,7 @@ define void @v4xi64_concat_vector_insert_idx0(ptr %a, ptr %b, i64 %x) {
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
; RV64-NEXT: vle64.v v10, (a1)
-; RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma
+; RV64-NEXT: vsetivli zero, 2, e64, m1, tu, ma
; RV64-NEXT: vmv.s.x v8, a2
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vslideup.vi v8, v10, 2
@@ -171,7 +171,7 @@ define void @v4xi64_concat_vector_insert_idx2(ptr %a, ptr %b, i64 %x) {
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vle64.v v8, (a1)
; RV64-NEXT: vle64.v v10, (a0)
-; RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma
+; RV64-NEXT: vsetivli zero, 2, e64, m1, tu, ma
; RV64-NEXT: vmv.s.x v8, a2
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vslideup.vi v10, v8, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
index 593f8e2612fec..86e9eb2a42ab3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
@@ -25,18 +25,18 @@ define void @constant_folding_crash(ptr %v54, <4 x ptr> %lanes.a, <4 x ptr> %lan
; RV32-NEXT: seqz a0, a0
; RV32-NEXT: vmv.v.x v11, a0
; RV32-NEXT: vmsne.vi v0, v11, 0
-; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vmerge.vvm v8, v9, v8, v0
-; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV32-NEXT: vmv.v.i v9, 0
-; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vmv.v.i v11, 10
; RV32-NEXT: vmv1r.v v0, v10
-; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV32-NEXT: vmerge.vim v9, v9, 1, v0
; RV32-NEXT: vrgather.vi v10, v9, 0
; RV32-NEXT: vmsne.vi v0, v10, 0
-; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vse32.v v11, (a0), v0.t
; RV32-NEXT: ret
@@ -50,18 +50,18 @@ define void @constant_folding_crash(ptr %v54, <4 x ptr> %lanes.a, <4 x ptr> %lan
; RV64-NEXT: seqz a0, a0
; RV64-NEXT: vmv.v.x v13, a0
; RV64-NEXT: vmsne.vi v0, v13, 0
-; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vmerge.vvm v8, v10, v8, v0
-; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64-NEXT: vmv.v.i v9, 0
-; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT: vmv.v.i v10, 10
; RV64-NEXT: vmv1r.v v0, v12
-; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64-NEXT: vmerge.vim v9, v9, 1, v0
; RV64-NEXT: vrgather.vi v11, v9, 0
; RV64-NEXT: vmsne.vi v0, v11, 0
-; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: vse32.v v10, (a0), v0.t
; RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll
index 05747ff0d049a..68f5a0b580c3c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll
@@ -18,7 +18,7 @@ define <2 x i16> @fixedlen(<2 x i32> %x) {
; CHECK-NEXT: vsrl.vi v8, v8, 16
; CHECK-NEXT: lui a0, 1048568
; CHECK-NEXT: vand.vx v8, v8, a0
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%v43 = lshr <2 x i32> %x, splat (i32 16)
diff --git a/llvm/test/CodeGen/RISCV/rvv/expandload.ll b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
index cc1282a9119da..76e1692bd2acc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/expandload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
@@ -657,7 +657,7 @@ define <128 x i16> @test_expandload_v128i16(ptr %base, <128 x i1> %mask, <128 x
; CHECK-RV32-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; CHECK-RV32-NEXT: vslidedown.vi v7, v0, 8
; CHECK-RV32-NEXT: li a2, 32
-; CHECK-RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-RV32-NEXT: vsetivli zero, 8, e64, m8, ta, ma
; CHECK-RV32-NEXT: vmv.x.s a3, v0
; CHECK-RV32-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-RV32-NEXT: vcpop.m a4, v0
@@ -1091,7 +1091,7 @@ define <64 x i32> @test_expandload_v64i32(ptr %base, <64 x i1> %mask, <64 x i32>
; CHECK-RV64-NEXT: li a1, 32
; CHECK-RV64-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-RV64-NEXT: vslidedown.vi v7, v0, 4
-; CHECK-RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-RV64-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; CHECK-RV64-NEXT: vmv.x.s a2, v0
; CHECK-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-RV64-NEXT: vcpop.m a3, v0
@@ -3907,7 +3907,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_544: # %cond.load
; CHECK-RV32-NEXT: lbu a1, 0(a0)
; CHECK-RV32-NEXT: vmv8r.v v16, v8
-; CHECK-RV32-NEXT: vsetvli zero, zero, e8, mf8, tu, ma
+; CHECK-RV32-NEXT: vsetivli zero, 1, e8, mf8, tu, ma
; CHECK-RV32-NEXT: vmv.s.x v8, a1
; CHECK-RV32-NEXT: addi a0, a0, 1
; CHECK-RV32-NEXT: vmv1r.v v16, v8
@@ -12760,7 +12760,7 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV64-NEXT: .LBB61_527: # %cond.load
; CHECK-RV64-NEXT: lbu a1, 0(a0)
; CHECK-RV64-NEXT: vmv8r.v v16, v8
-; CHECK-RV64-NEXT: vsetvli zero, zero, e8, mf8, tu, ma
+; CHECK-RV64-NEXT: vsetivli zero, 1, e8, mf8, tu, ma
; CHECK-RV64-NEXT: vmv.s.x v8, a1
; CHECK-RV64-NEXT: addi a0, a0, 1
; CHECK-RV64-NEXT: vmv1r.v v16, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
index 903c0dcaba2d8..b8aceb5cc3db3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
@@ -338,7 +338,7 @@ define bfloat @extractelt_nxv8bf16_idx(<vscale x 8 x bfloat> %v, i32 zeroext %id
; ZVFBFA: # %bb.0:
; ZVFBFA-NEXT: vsetivli zero, 1, e16, m2, ta, ma
; ZVFBFA-NEXT: vslidedown.vx v8, v8, a0
-; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vsetivli zero, 1, e16alt, m2, ta, ma
; ZVFBFA-NEXT: vfmv.f.s fa0, v8
; ZVFBFA-NEXT: ret
%r = extractelement <vscale x 8 x bfloat> %v, i32 %idx
@@ -423,7 +423,7 @@ define bfloat @extractelt_nxv16bf16_idx(<vscale x 16 x bfloat> %v, i32 zeroext %
; ZVFBFA: # %bb.0:
; ZVFBFA-NEXT: vsetivli zero, 1, e16, m4, ta, ma
; ZVFBFA-NEXT: vslidedown.vx v8, v8, a0
-; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vsetivli zero, 1, e16alt, m4, ta, ma
; ZVFBFA-NEXT: vfmv.f.s fa0, v8
; ZVFBFA-NEXT: ret
%r = extractelement <vscale x 16 x bfloat> %v, i32 %idx
@@ -508,7 +508,7 @@ define bfloat @extractelt_nxv32bf16_idx(<vscale x 32 x bfloat> %v, i32 zeroext %
; ZVFBFA: # %bb.0:
; ZVFBFA-NEXT: vsetivli zero, 1, e16, m8, ta, ma
; ZVFBFA-NEXT: vslidedown.vx v8, v8, a0
-; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m8, ta, ma
+; ZVFBFA-NEXT: vsetivli zero, 1, e16alt, m8, ta, ma
; ZVFBFA-NEXT: vfmv.f.s fa0, v8
; ZVFBFA-NEXT: ret
%r = extractelement <vscale x 32 x bfloat> %v, i32 %idx
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
index bd1209a17b534..b8ff35c2c1ac2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
@@ -81,7 +81,7 @@ define <4 x i16> @sabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) {
; CHECK-NEXT: vmin.vv v10, v8, v9
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v9, v8, v10
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vzext.vf2 v8, v9
; CHECK-NEXT: ret
%a.sext = sext <4 x i8> %a to <4 x i16>
@@ -116,7 +116,7 @@ define <8 x i16> @sabd_8h_promoted_ops(<8 x i8> %a, <8 x i8> %b) {
; CHECK-NEXT: vmin.vv v10, v8, v9
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v9, v8, v10
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vzext.vf2 v8, v9
; CHECK-NEXT: ret
%a.sext = sext <8 x i8> %a to <8 x i16>
@@ -151,7 +151,7 @@ define <2 x i32> @sabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) {
; CHECK-NEXT: vmin.vv v10, v8, v9
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v9, v8, v10
-; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vzext.vf2 v8, v9
; CHECK-NEXT: ret
%a.sext = sext <2 x i16> %a to <2 x i32>
@@ -186,7 +186,7 @@ define <4 x i32> @sabd_4s_promoted_ops(<4 x i16> %a, <4 ...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/169307
More information about the llvm-commits
mailing list