[llvm] [RISCV] Sink instructions so AVL dominates in RISCVVLOptimizer (PR #184155)
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Tue Mar 3 21:15:46 PST 2026
https://github.com/lukel97 updated https://github.com/llvm/llvm-project/pull/184155
>From cc157f77ec11852a8321e30e9a58a157224ff3fb Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Mon, 16 Feb 2026 17:31:14 +0800
Subject: [PATCH 1/5] Precommit tests
---
llvm/test/CodeGen/RISCV/rvv/vl-opt.mir | 89 ++++++++++++++++++++++++++
1 file changed, 89 insertions(+)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
index 111266d2e988c..d340c70d47c35 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
@@ -840,3 +840,92 @@ body: |
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
$v8 = COPY %y
...
+---
+name: vl_doesnt_dominate_sink
+body: |
+ bb.0:
+ liveins: $x1
+ ; CHECK-LABEL: name: vl_doesnt_dominate_sink
+ ; CHECK: liveins: $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1 /* vl=VLMAX */, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %vl:gprnox0 = COPY $x1
+ ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl /* vl */, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 5 /* e32 */, 0
+ %vl:gprnox0 = COPY $x1
+ %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 5 /* e32 */, 0
+ $v8 = COPY %y
+...
+---
+name: vl_doesnt_dominate_sink_load
+body: |
+ bb.0:
+ liveins: $x1
+ ; CHECK-LABEL: name: vl_doesnt_dominate_sink_load
+ ; CHECK: liveins: $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:vr = PseudoVLE32_V_M1 $noreg, $noreg, -1 /* vl=VLMAX */, 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size, align 1)
+ ; CHECK-NEXT: %vl:gprnox0 = COPY $x1
+ ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl /* vl */, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
+ %x:vr = PseudoVLE32_V_M1 $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size)
+ %vl:gprnox0 = COPY $x1
+ %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 5 /* e32 */, 0
+ $v8 = COPY %y
+...
+---
+name: vl_doesnt_dominate_cant_sink_load
+body: |
+ bb.0:
+ liveins: $x1
+ ; CHECK-LABEL: name: vl_doesnt_dominate_cant_sink_load
+ ; CHECK: liveins: $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:vr = PseudoVLE32_V_M1 $noreg, $noreg, -1 /* vl=VLMAX */, 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size, align 1)
+ ; CHECK-NEXT: PseudoVSE32_V_M1 $noreg, $noreg, -1 /* vl=VLMAX */, 5 /* e32 */
+ ; CHECK-NEXT: %vl:gprnox0 = COPY $x1
+ ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl /* vl */, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
+ %x:vr = PseudoVLE32_V_M1 $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size)
+ PseudoVSE32_V_M1 $noreg, $noreg, -1, 5 /* e32 */
+ %vl:gprnox0 = COPY $x1
+ %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 5 /* e32 */, 0
+ $v8 = COPY %y
+...
+---
+name: vl_doesnt_dominate_can_sink_vxsat
+body: |
+ bb.0:
+ liveins: $x1
+ ; CHECK-LABEL: name: vl_doesnt_dominate_can_sink_vxsat
+ ; CHECK: liveins: $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:vr = PseudoVSADDU_VV_M1 $noreg, $noreg, $noreg, -1 /* vl=VLMAX */, 5 /* e32 */, 0 /* tu, mu */, implicit-def dead $vxsat
+ ; CHECK-NEXT: %vl:gprnox0 = COPY $x1
+ ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl /* vl */, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
+ %x:vr = PseudoVSADDU_VV_M1 $noreg, $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */, implicit-def dead $vxsat
+ %vl:gprnox0 = COPY $x1
+ %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 5 /* e32 */, 0
+ $v8 = COPY %y
+...
+---
+name: vl_doesnt_dominate_cant_sink_vxsat
+body: |
+ bb.0:
+ liveins: $x1
+ ; CHECK-LABEL: name: vl_doesnt_dominate_cant_sink_vxsat
+ ; CHECK: liveins: $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:vr = PseudoVSADDU_VV_M1 $noreg, $noreg, $noreg, -1 /* vl=VLMAX */, 5 /* e32 */, 0 /* tu, mu */, implicit-def dead $vxsat
+ ; CHECK-NEXT: $x2 = COPY $vxsat
+ ; CHECK-NEXT: %vl:gprnox0 = COPY $x1
+ ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl /* vl */, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
+ %x:vr = PseudoVSADDU_VV_M1 $noreg, $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */, implicit-def dead $vxsat
+ $x2 = COPY $vxsat
+ %vl:gprnox0 = COPY $x1
+ %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 5 /* e32 */, 0
+ $v8 = COPY %y
+...
>From 4f07fbefdb819c2a489c8899ddb9fafeed2ff395 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Mon, 16 Feb 2026 18:05:55 +0800
Subject: [PATCH 2/5] [RISCV] Sink instructions so AVL dominates in
RISCVVLOptimizer
The last feature that RISCVVectorPeephole::tryToReduceVL has that RISCVVLOptimizer doesn't is that if the vl to reduce to doesn't dominate a vector pseudo, it can sink the pseudo below the vl definition if it's safe to do so.
This PR shares the logic to check for physical register clobbering in RISCVInstrInfo, but there may be a better place for it.
The DemandedVLs DenseMap needs to be switched to a MapVector to get deterministic ordering, since the order in which we sink instructions can affect the resulting codegen.
This helps removes a few vsetvli toggles in llvm-test-suite.
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 27 +
llvm/lib/Target/RISCV/RISCVInstrInfo.h | 5 +
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 20 +-
llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp | 30 +-
.../test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll | 8 +-
.../CodeGen/RISCV/GlobalISel/rvv/vfadd.ll | 12 +-
...regalloc-last-chance-recoloring-failure.ll | 6 +-
.../RISCV/rvv/access-fixed-objects-by-rvv.ll | 23 +-
.../RISCV/rvv/combine-reduce-add-to-vcpop.ll | 3 +-
llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll | 26 +-
llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll | 26 +-
llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll | 52 +-
llvm/test/CodeGen/RISCV/rvv/memory-args.ll | 7 +-
.../RISCV/rvv/nontemporal-vp-scalable.ll | 1430 +++++++++--------
llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll | 24 +-
llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll | 80 +-
llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll | 20 +-
llvm/test/CodeGen/RISCV/rvv/vector-splice.ll | 12 +-
llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll | 148 +-
llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll | 136 +-
llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll | 9 +-
llvm/test/CodeGen/RISCV/rvv/vl-opt.mir | 46 +-
llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll | 20 +-
llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll | 20 +-
llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll | 20 +-
llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll | 20 +-
.../RISCV/rvv/vp-vector-interleaved-access.ll | 10 +-
.../test/CodeGen/RISCV/rvv/vpgather-sdnode.ll | 26 +-
llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll | 57 +-
llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll | 20 +-
llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll | 20 +-
llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll | 85 +-
llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll | 6 +-
llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll | 26 +-
llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll | 26 +-
llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll | 9 +-
36 files changed, 1289 insertions(+), 1226 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index fa388a3ec64d8..e7fc3aed6ea7e 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -5334,3 +5334,30 @@ bool RISCVInstrInfo::requiresNTLHint(const MachineInstr &MI) const {
return true;
}
+
+bool RISCVInstrInfo::isSafeToMove(const MachineInstr &From,
+ const MachineInstr &To) {
+ assert(From.getParent() == To.getParent());
+ SmallVector<Register> PhysUses, PhysDefs;
+ for (const MachineOperand &MO : From.all_uses())
+ if (MO.getReg().isPhysical())
+ PhysUses.push_back(MO.getReg());
+ for (const MachineOperand &MO : From.all_defs())
+ if (MO.getReg().isPhysical())
+ PhysDefs.push_back(MO.getReg());
+ bool SawStore = false;
+ for (auto II = std::next(From.getIterator()); II != To.getIterator(); II++) {
+ for (Register PhysReg : PhysUses)
+ if (II->definesRegister(PhysReg, nullptr))
+ return false;
+ for (Register PhysReg : PhysDefs)
+ if (II->definesRegister(PhysReg, nullptr) ||
+ II->readsRegister(PhysReg, nullptr))
+ return false;
+ if (II->mayStore()) {
+ SawStore = true;
+ break;
+ }
+ }
+ return From.isSafeToMove(SawStore);
+}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index 15d0e4d3e7d52..d3ad5bf0a0763 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -333,6 +333,11 @@ class RISCVInstrInfo : public RISCVGenInstrInfo {
/// Return true if the instruction requires an NTL hint to be emitted.
bool requiresNTLHint(const MachineInstr &MI) const;
+ /// Return true if moving \p From down to \p To won't cause any physical
+ /// register reads or writes to be clobbered and no visible side effects are
+ /// affected. From and To must be in the same block.
+ static bool isSafeToMove(const MachineInstr &From, const MachineInstr &To);
+
/// Return true if pairing the given load or store may be paired with another.
static bool isPairableLdStInstOpc(unsigned Opc);
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 99eaf2683e1cc..14f10f9b11128 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -92,7 +92,7 @@ class RISCVVLOptimizer : public MachineFunctionPass {
/// For a given instruction, records what elements of it are demanded by
/// downstream users.
- DenseMap<const MachineInstr *, DemandedVL> DemandedVLs;
+ MapVector<const MachineInstr *, DemandedVL> DemandedVLs;
SetVector<const MachineInstr *> Worklist;
/// \returns all vector virtual registers that \p MI uses.
@@ -1255,10 +1255,20 @@ bool RISCVVLOptimizer::tryReduceVL(MachineInstr &MI,
VLOp.ChangeToImmediate(CommonVL.getImm());
return true;
}
- const MachineInstr *VLMI = MRI->getVRegDef(CommonVL.getReg());
- if (!MDT->dominates(VLMI, &MI)) {
- LLVM_DEBUG(dbgs() << " Abort due to VL not dominating.\n");
- return false;
+ MachineInstr *VLMI = MRI->getVRegDef(CommonVL.getReg());
+ auto VLDominates = [this, &VLMI](MachineInstr &MI) {
+ return MDT->dominates(VLMI, &MI);
+ };
+ if (!VLDominates(MI)) {
+ assert(MI.getNumExplicitDefs() == 1);
+ if (VLMI->getParent() == MI.getParent() &&
+ all_of(MRI->use_instructions(MI.getOperand(0).getReg()), VLDominates) &&
+ RISCVInstrInfo::isSafeToMove(MI, *VLMI->getNextNode())) {
+ MI.moveBefore(VLMI->getNextNode());
+ } else {
+ LLVM_DEBUG(dbgs() << " Abort due to VL not dominating.\n");
+ return false;
+ }
}
LLVM_DEBUG(dbgs() << " Reduce VL from " << VLOp << " to "
<< printReg(CommonVL.getReg(), MRI->getTargetRegisterInfo())
diff --git a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
index c7c8eeed02aeb..07110c4b7f1ad 100644
--- a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
@@ -530,34 +530,6 @@ bool RISCVVectorPeephole::convertToUnmasked(MachineInstr &MI) const {
return true;
}
-/// Check if it's safe to move From down to To, checking that no physical
-/// registers are clobbered.
-static bool isSafeToMove(const MachineInstr &From, const MachineInstr &To) {
- assert(From.getParent() == To.getParent());
- SmallVector<Register> PhysUses, PhysDefs;
- for (const MachineOperand &MO : From.all_uses())
- if (MO.getReg().isPhysical())
- PhysUses.push_back(MO.getReg());
- for (const MachineOperand &MO : From.all_defs())
- if (MO.getReg().isPhysical())
- PhysDefs.push_back(MO.getReg());
- bool SawStore = false;
- for (auto II = std::next(From.getIterator()); II != To.getIterator(); II++) {
- for (Register PhysReg : PhysUses)
- if (II->definesRegister(PhysReg, nullptr))
- return false;
- for (Register PhysReg : PhysDefs)
- if (II->definesRegister(PhysReg, nullptr) ||
- II->readsRegister(PhysReg, nullptr))
- return false;
- if (II->mayStore()) {
- SawStore = true;
- break;
- }
- }
- return From.isSafeToMove(SawStore);
-}
-
/// Given A and B are in the same MBB, returns true if A comes before B.
static bool dominates(MachineBasicBlock::const_iterator A,
MachineBasicBlock::const_iterator B) {
@@ -585,7 +557,7 @@ bool RISCVVectorPeephole::ensureDominates(const MachineOperand &MO,
MachineInstr *Def = MRI->getVRegDef(MO.getReg());
if (Def->getParent() == Src.getParent() && !dominates(Def, Src)) {
- if (!isSafeToMove(Src, *Def->getNextNode()))
+ if (!RISCVInstrInfo::isSafeToMove(Src, *Def->getNextNode()))
return false;
Src.moveBefore(Def->getNextNode());
}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll
index c3cc472c4706f..e07548f45492a 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll
@@ -221,8 +221,8 @@ entry:
define <vscale x 64 x i8> @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: ret
entry:
@@ -420,8 +420,8 @@ entry:
define <vscale x 32 x i16> @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: ret
entry:
@@ -586,8 +586,8 @@ entry:
define <vscale x 16 x i32> @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: ret
entry:
@@ -719,8 +719,8 @@ entry:
define <vscale x 8 x i64> @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll
index 1dd3a831903b5..d53c3609f88ef 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll
@@ -216,9 +216,9 @@ entry:
define <vscale x 32 x half> @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
@@ -402,9 +402,9 @@ entry:
define <vscale x 16 x float> @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
@@ -551,9 +551,9 @@ entry:
define <vscale x 8 x double> @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
index ae797de91b857..c399a1c3ca07f 100644
--- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
+++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
@@ -24,8 +24,6 @@ define void @last_chance_recoloring_failure() {
; CHECK-NEXT: sub sp, sp, a0
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 16 * vlenb
; CHECK-NEXT: li a0, 55
-; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmclr.m v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vloxseg2ei32.v v16, (a1), v8
; CHECK-NEXT: csrr a0, vlenb
@@ -35,6 +33,7 @@ define void @last_chance_recoloring_failure() {
; CHECK-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
; CHECK-NEXT: li s0, 36
; CHECK-NEXT: vsetvli zero, s0, e16, m4, ta, ma
+; CHECK-NEXT: vmclr.m v0
; CHECK-NEXT: vfwadd.vv v16, v8, v12, v0.t
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
@@ -76,8 +75,6 @@ define void @last_chance_recoloring_failure() {
; SUBREGLIVENESS-NEXT: sub sp, sp, a0
; SUBREGLIVENESS-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 16 * vlenb
; SUBREGLIVENESS-NEXT: li a0, 55
-; SUBREGLIVENESS-NEXT: vsetvli a2, zero, e8, m2, ta, ma
-; SUBREGLIVENESS-NEXT: vmclr.m v0
; SUBREGLIVENESS-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; SUBREGLIVENESS-NEXT: vloxseg2ei32.v v16, (a1), v8
; SUBREGLIVENESS-NEXT: csrr a0, vlenb
@@ -87,6 +84,7 @@ define void @last_chance_recoloring_failure() {
; SUBREGLIVENESS-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
; SUBREGLIVENESS-NEXT: li s0, 36
; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, ta, ma
+; SUBREGLIVENESS-NEXT: vmclr.m v0
; SUBREGLIVENESS-NEXT: vfwadd.vv v16, v8, v12, v0.t
; SUBREGLIVENESS-NEXT: addi a0, sp, 16
; SUBREGLIVENESS-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
diff --git a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
index 4aaaa88db8b6e..dfde5607eb542 100644
--- a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
@@ -30,13 +30,13 @@ define <vscale x 1 x i64> @access_fixed_and_vector_objects(ptr %val) {
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: sub sp, sp, a0
; RV64IV-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x90, 0x04, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 528 + 1 * vlenb
-; RV64IV-NEXT: addi a0, sp, 8
-; RV64IV-NEXT: vl1re64.v v8, (a0)
-; RV64IV-NEXT: addi a0, sp, 528
-; RV64IV-NEXT: vl1re64.v v9, (a0)
; RV64IV-NEXT: ld a0, 520(sp)
+; RV64IV-NEXT: addi a1, sp, 8
+; RV64IV-NEXT: addi a2, sp, 528
; RV64IV-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; RV64IV-NEXT: vadd.vv v8, v8, v9
+; RV64IV-NEXT: vle64.v v8, (a2)
+; RV64IV-NEXT: vle64.v v9, (a1)
+; RV64IV-NEXT: vadd.vv v8, v9, v8
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: add sp, sp, a0
; RV64IV-NEXT: .cfi_def_cfa sp, 528
@@ -76,13 +76,14 @@ define <vscale x 1 x i64> @probe_fixed_and_vector_objects(ptr %val, <vscale x 1
; RV64IV-NEXT: .cfi_def_cfa_register sp
; RV64IV-NEXT: sub sp, sp, t1
; RV64IV-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x90, 0x04, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 528 + 1 * vlenb
-; RV64IV-NEXT: addi a0, sp, 8
-; RV64IV-NEXT: vl1re64.v v9, (a0)
-; RV64IV-NEXT: addi a0, sp, 528
-; RV64IV-NEXT: vl1re64.v v10, (a0)
; RV64IV-NEXT: ld a0, 520(sp)
-; RV64IV-NEXT: vsetvli zero, a0, e64, m1, tu, ma
-; RV64IV-NEXT: vadd.vv v8, v9, v10
+; RV64IV-NEXT: addi a1, sp, 8
+; RV64IV-NEXT: addi a2, sp, 528
+; RV64IV-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64IV-NEXT: vle64.v v9, (a2)
+; RV64IV-NEXT: vle64.v v10, (a1)
+; RV64IV-NEXT: vsetvli zero, zero, e64, m1, tu, ma
+; RV64IV-NEXT: vadd.vv v8, v10, v9
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: add sp, sp, a0
; RV64IV-NEXT: .cfi_def_cfa sp, 528
diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll b/llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll
index 96252f070a580..550869b10d3f3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll
@@ -191,11 +191,10 @@ define i32 @test_nxv1i1(<vscale x 1 x i1> %x) {
;
; ZVE-LABEL: test_nxv1i1:
; ZVE: # %bb.0: # %entry
-; ZVE-NEXT: vsetvli a0, zero, e32, m1, ta, ma
-; ZVE-NEXT: vmv.v.i v8, 0
; ZVE-NEXT: csrr a0, vlenb
; ZVE-NEXT: srli a0, a0, 3
; ZVE-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVE-NEXT: vmv.v.i v8, 0
; ZVE-NEXT: vmerge.vim v8, v8, 1, v0
; ZVE-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; ZVE-NEXT: vmv.s.x v9, zero
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
index c432204b078f5..b95e138dfd3da 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
@@ -1301,17 +1301,17 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a3, a1, 3
-; CHECK-NEXT: srli a4, a1, 3
-; CHECK-NEXT: vslidedown.vx v6, v0, a4
+; CHECK-NEXT: srli a3, a1, 3
; CHECK-NEXT: sub a4, a2, a1
-; CHECK-NEXT: add a3, a0, a3
-; CHECK-NEXT: vl8re64.v v24, (a3)
+; CHECK-NEXT: vslidedown.vx v6, v0, a3
; CHECK-NEXT: sltu a3, a2, a4
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a3, a3, a4
-; CHECK-NEXT: vmv1r.v v0, v6
+; CHECK-NEXT: slli a4, a1, 3
+; CHECK-NEXT: add a4, a0, a4
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; CHECK-NEXT: vle64.v v24, (a4)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vmfeq.vv v8, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmv8r.v v8, v16
@@ -1377,14 +1377,14 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a3, a1, 3
-; CHECK-NEXT: sub a4, a2, a1
-; CHECK-NEXT: add a3, a0, a3
-; CHECK-NEXT: vl8re64.v v24, (a3)
-; CHECK-NEXT: sltu a3, a2, a4
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a3, a3, a4
+; CHECK-NEXT: sub a3, a2, a1
+; CHECK-NEXT: sltu a4, a2, a3
+; CHECK-NEXT: addi a4, a4, -1
+; CHECK-NEXT: and a3, a4, a3
+; CHECK-NEXT: slli a4, a1, 3
+; CHECK-NEXT: add a4, a0, a4
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; CHECK-NEXT: vle64.v v24, (a4)
; CHECK-NEXT: vmfeq.vv v0, v16, v16
; CHECK-NEXT: vmfeq.vv v7, v24, v24
; CHECK-NEXT: vl8re64.v v8, (a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
index 27816a862cc1b..06c9cffa4eb1e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
@@ -1301,17 +1301,17 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a3, a1, 3
-; CHECK-NEXT: srli a4, a1, 3
-; CHECK-NEXT: vslidedown.vx v6, v0, a4
+; CHECK-NEXT: srli a3, a1, 3
; CHECK-NEXT: sub a4, a2, a1
-; CHECK-NEXT: add a3, a0, a3
-; CHECK-NEXT: vl8re64.v v24, (a3)
+; CHECK-NEXT: vslidedown.vx v6, v0, a3
; CHECK-NEXT: sltu a3, a2, a4
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a3, a3, a4
-; CHECK-NEXT: vmv1r.v v0, v6
+; CHECK-NEXT: slli a4, a1, 3
+; CHECK-NEXT: add a4, a0, a4
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; CHECK-NEXT: vle64.v v24, (a4)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vmfeq.vv v8, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmv8r.v v8, v16
@@ -1377,14 +1377,14 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a3, a1, 3
-; CHECK-NEXT: sub a4, a2, a1
-; CHECK-NEXT: add a3, a0, a3
-; CHECK-NEXT: vl8re64.v v24, (a3)
-; CHECK-NEXT: sltu a3, a2, a4
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a3, a3, a4
+; CHECK-NEXT: sub a3, a2, a1
+; CHECK-NEXT: sltu a4, a2, a3
+; CHECK-NEXT: addi a4, a4, -1
+; CHECK-NEXT: and a3, a4, a3
+; CHECK-NEXT: slli a4, a1, 3
+; CHECK-NEXT: add a4, a0, a4
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; CHECK-NEXT: vle64.v v24, (a4)
; CHECK-NEXT: vmfeq.vv v0, v16, v16
; CHECK-NEXT: vmfeq.vv v7, v24, v24
; CHECK-NEXT: vl8re64.v v8, (a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
index 576753d1d7821..6303dccbdfb91 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
@@ -940,7 +940,7 @@ define <vscale x 16 x i64> @fshr_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
@@ -956,23 +956,25 @@ define <vscale x 16 x i64> @fshr_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a3, a1, 3
; CHECK-NEXT: sub a5, a4, a1
-; CHECK-NEXT: add a6, a2, a3
-; CHECK-NEXT: vl8re64.v v8, (a6)
; CHECK-NEXT: sltu a6, a4, a5
; CHECK-NEXT: addi a6, a6, -1
; CHECK-NEXT: and a5, a6, a5
+; CHECK-NEXT: add a6, a2, a3
+; CHECK-NEXT: vsetvli zero, a5, e64, m8, ta, ma
+; CHECK-NEXT: vle64.v v16, (a6)
; CHECK-NEXT: srli a6, a1, 3
; CHECK-NEXT: add a3, a0, a3
-; CHECK-NEXT: vl8re64.v v16, (a3)
+; CHECK-NEXT: vle64.v v8, (a3)
+; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a6
; CHECK-NEXT: li a3, 63
; CHECK-NEXT: vsetvli zero, a5, e64, m8, ta, ma
-; CHECK-NEXT: vand.vx v24, v8, a3, v0.t
-; CHECK-NEXT: vsrl.vv v16, v16, v24, v0.t
+; CHECK-NEXT: vand.vx v24, v16, a3, v0.t
+; CHECK-NEXT: vsrl.vv v8, v8, v24, v0.t
; CHECK-NEXT: addi a5, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a5) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vnot.v v8, v8, v0.t
-; CHECK-NEXT: vand.vx v16, v8, a3, v0.t
+; CHECK-NEXT: vs8r.v v8, (a5) # vscale x 64-byte Folded Spill
+; CHECK-NEXT: vnot.v v16, v16, v0.t
+; CHECK-NEXT: vand.vx v16, v16, a3, v0.t
; CHECK-NEXT: vl8re64.v v8, (a0)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -1007,18 +1009,18 @@ define <vscale x 16 x i64> @fshr_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vsrl.vv v16, v16, v8, v0.t
-; CHECK-NEXT: vnot.v v24, v24, v0.t
-; CHECK-NEXT: vand.vx v24, v24, a3, v0.t
+; CHECK-NEXT: vsrl.vv v8, v16, v8, v0.t
+; CHECK-NEXT: vnot.v v16, v24, v0.t
+; CHECK-NEXT: vand.vx v16, v16, a3, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
-; CHECK-NEXT: vsll.vv v8, v8, v24, v0.t
-; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
+; CHECK-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: vsll.vi v24, v24, 1, v0.t
+; CHECK-NEXT: vsll.vv v16, v24, v16, v0.t
+; CHECK-NEXT: vor.vv v8, v16, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
@@ -1045,7 +1047,7 @@ define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
@@ -1054,14 +1056,16 @@ define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a5, a1, 3
+; CHECK-NEXT: sub a3, a4, a1
+; CHECK-NEXT: sltu a6, a4, a3
+; CHECK-NEXT: addi a6, a6, -1
+; CHECK-NEXT: and a6, a6, a3
+; CHECK-NEXT: add a3, a2, a5
+; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
+; CHECK-NEXT: vle64.v v8, (a3)
; CHECK-NEXT: srli a3, a1, 3
-; CHECK-NEXT: sub a6, a4, a1
+; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a3
-; CHECK-NEXT: add a3, a2, a5
-; CHECK-NEXT: vl8re64.v v8, (a3)
-; CHECK-NEXT: sltu a3, a4, a6
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a6, a3, a6
; CHECK-NEXT: li a3, 63
; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
; CHECK-NEXT: vand.vx v24, v8, a3, v0.t
@@ -1070,7 +1074,7 @@ define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: vs8r.v v16, (a6) # vscale x 64-byte Folded Spill
; CHECK-NEXT: add a5, a0, a5
; CHECK-NEXT: vnot.v v8, v8, v0.t
-; CHECK-NEXT: vl8re64.v v16, (a5)
+; CHECK-NEXT: vle64.v v16, (a5)
; CHECK-NEXT: vand.vx v8, v8, a3, v0.t
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
index d5f8cc3b6ee93..8cdea77f658ca 100644
--- a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
@@ -5,9 +5,10 @@
define <vscale x 64 x i8> @callee(<vscale x 64 x i8> %arg0, <vscale x 64 x i8> %arg1, <vscale x 64 x i8> %arg2) {
; RV64IV-LABEL: callee:
; RV64IV: # %bb.0:
-; RV64IV-NEXT: vl8r.v v24, (a0)
-; RV64IV-NEXT: li a0, 1024
-; RV64IV-NEXT: vsetvli zero, a0, e8, m8, tu, ma
+; RV64IV-NEXT: li a1, 1024
+; RV64IV-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV64IV-NEXT: vle8.v v24, (a0)
+; RV64IV-NEXT: vsetvli zero, zero, e8, m8, tu, ma
; RV64IV-NEXT: vmacc.vv v8, v16, v24
; RV64IV-NEXT: ret
%ret = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8(
diff --git a/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll b/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll
index 1ee7e138654b9..e84e456f5c2a4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll
@@ -29315,31 +29315,32 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_P1(<vscale x 32 x
; CHECK-RV64V-NEXT: sub sp, sp, a1
; CHECK-RV64V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV64V-NEXT: addi a1, sp, 16
-; CHECK-RV64V-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV64V-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV64V-NEXT: csrr a1, vlenb
-; CHECK-RV64V-NEXT: vl8re64.v v8, (a0)
+; CHECK-RV64V-NEXT: vl8re64.v v16, (a0)
; CHECK-RV64V-NEXT: slli a4, a1, 3
; CHECK-RV64V-NEXT: slli a3, a1, 1
-; CHECK-RV64V-NEXT: add a0, a0, a4
-; CHECK-RV64V-NEXT: sub a4, a2, a3
-; CHECK-RV64V-NEXT: vl8re64.v v0, (a0)
-; CHECK-RV64V-NEXT: sltu a0, a2, a4
-; CHECK-RV64V-NEXT: addi a0, a0, -1
-; CHECK-RV64V-NEXT: and a0, a0, a4
-; CHECK-RV64V-NEXT: sub a4, a0, a1
-; CHECK-RV64V-NEXT: sltu a5, a0, a4
+; CHECK-RV64V-NEXT: add a4, a0, a4
+; CHECK-RV64V-NEXT: sub a0, a2, a3
+; CHECK-RV64V-NEXT: sltu a5, a2, a0
; CHECK-RV64V-NEXT: addi a5, a5, -1
-; CHECK-RV64V-NEXT: and a4, a5, a4
+; CHECK-RV64V-NEXT: and a0, a5, a0
+; CHECK-RV64V-NEXT: sub a5, a0, a1
+; CHECK-RV64V-NEXT: sltu a6, a0, a5
+; CHECK-RV64V-NEXT: addi a6, a6, -1
+; CHECK-RV64V-NEXT: and a5, a6, a5
+; CHECK-RV64V-NEXT: vsetvli zero, a5, e64, m8, ta, ma
+; CHECK-RV64V-NEXT: vle64.v v0, (a4)
; CHECK-RV64V-NEXT: bltu a0, a1, .LBB850_2
; CHECK-RV64V-NEXT: # %bb.1:
; CHECK-RV64V-NEXT: mv a0, a1
; CHECK-RV64V-NEXT: .LBB850_2:
-; CHECK-RV64V-NEXT: vsetvli zero, a4, e8, m1, ta, ma
+; CHECK-RV64V-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.p1
; CHECK-RV64V-NEXT: vluxei64.v v27, (zero), v0
; CHECK-RV64V-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.p1
-; CHECK-RV64V-NEXT: vluxei64.v v26, (zero), v8
+; CHECK-RV64V-NEXT: vluxei64.v v26, (zero), v16
; CHECK-RV64V-NEXT: bltu a2, a3, .LBB850_4
; CHECK-RV64V-NEXT: # %bb.3:
; CHECK-RV64V-NEXT: mv a2, a3
@@ -29348,6 +29349,8 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_P1(<vscale x 32 x
; CHECK-RV64V-NEXT: sltu a3, a2, a0
; CHECK-RV64V-NEXT: addi a3, a3, -1
; CHECK-RV64V-NEXT: and a0, a3, a0
+; CHECK-RV64V-NEXT: addi a3, sp, 16
+; CHECK-RV64V-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.p1
; CHECK-RV64V-NEXT: vluxei64.v v25, (zero), v16
@@ -29355,8 +29358,6 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_P1(<vscale x 32 x
; CHECK-RV64V-NEXT: # %bb.5:
; CHECK-RV64V-NEXT: mv a2, a1
; CHECK-RV64V-NEXT: .LBB850_6:
-; CHECK-RV64V-NEXT: addi a0, sp, 16
-; CHECK-RV64V-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.p1
; CHECK-RV64V-NEXT: vluxei64.v v24, (zero), v8
@@ -29399,31 +29400,32 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_P1(<vscale x 32 x
; CHECK-RV64VC-NEXT: sub sp, sp, a1
; CHECK-RV64VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV64VC-NEXT: addi a1, sp, 16
-; CHECK-RV64VC-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV64VC-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV64VC-NEXT: csrr a1, vlenb
-; CHECK-RV64VC-NEXT: vl8re64.v v8, (a0)
+; CHECK-RV64VC-NEXT: vl8re64.v v16, (a0)
; CHECK-RV64VC-NEXT: slli a4, a1, 3
; CHECK-RV64VC-NEXT: slli a3, a1, 1
-; CHECK-RV64VC-NEXT: add a0, a0, a4
-; CHECK-RV64VC-NEXT: sub a4, a2, a3
-; CHECK-RV64VC-NEXT: vl8re64.v v0, (a0)
-; CHECK-RV64VC-NEXT: sltu a0, a2, a4
-; CHECK-RV64VC-NEXT: addi a0, a0, -1
-; CHECK-RV64VC-NEXT: and a0, a0, a4
-; CHECK-RV64VC-NEXT: sub a4, a0, a1
-; CHECK-RV64VC-NEXT: sltu a5, a0, a4
+; CHECK-RV64VC-NEXT: add a6, a0, a4
+; CHECK-RV64VC-NEXT: sub a0, a2, a3
+; CHECK-RV64VC-NEXT: sltu a5, a2, a0
; CHECK-RV64VC-NEXT: addi a5, a5, -1
+; CHECK-RV64VC-NEXT: and a0, a0, a5
+; CHECK-RV64VC-NEXT: sub a5, a0, a1
+; CHECK-RV64VC-NEXT: sltu a4, a0, a5
+; CHECK-RV64VC-NEXT: addi a4, a4, -1
; CHECK-RV64VC-NEXT: and a4, a4, a5
+; CHECK-RV64VC-NEXT: vsetvli zero, a4, e64, m8, ta, ma
+; CHECK-RV64VC-NEXT: vle64.v v0, (a6)
; CHECK-RV64VC-NEXT: bltu a0, a1, .LBB850_2
; CHECK-RV64VC-NEXT: # %bb.1:
; CHECK-RV64VC-NEXT: mv a0, a1
; CHECK-RV64VC-NEXT: .LBB850_2:
-; CHECK-RV64VC-NEXT: vsetvli zero, a4, e8, m1, ta, ma
+; CHECK-RV64VC-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.p1
; CHECK-RV64VC-NEXT: vluxei64.v v27, (zero), v0
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.p1
-; CHECK-RV64VC-NEXT: vluxei64.v v26, (zero), v8
+; CHECK-RV64VC-NEXT: vluxei64.v v26, (zero), v16
; CHECK-RV64VC-NEXT: bltu a2, a3, .LBB850_4
; CHECK-RV64VC-NEXT: # %bb.3:
; CHECK-RV64VC-NEXT: mv a2, a3
@@ -29432,6 +29434,8 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_P1(<vscale x 32 x
; CHECK-RV64VC-NEXT: sltu a3, a2, a0
; CHECK-RV64VC-NEXT: addi a3, a3, -1
; CHECK-RV64VC-NEXT: and a0, a0, a3
+; CHECK-RV64VC-NEXT: addi a3, sp, 16
+; CHECK-RV64VC-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.p1
; CHECK-RV64VC-NEXT: vluxei64.v v25, (zero), v16
@@ -29439,8 +29443,6 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_P1(<vscale x 32 x
; CHECK-RV64VC-NEXT: # %bb.5:
; CHECK-RV64VC-NEXT: mv a2, a1
; CHECK-RV64VC-NEXT: .LBB850_6:
-; CHECK-RV64VC-NEXT: addi a0, sp, 16
-; CHECK-RV64VC-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.p1
; CHECK-RV64VC-NEXT: vluxei64.v v24, (zero), v8
@@ -29488,31 +29490,32 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_PALL(<vscale x 32
; CHECK-RV64V-NEXT: sub sp, sp, a1
; CHECK-RV64V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV64V-NEXT: addi a1, sp, 16
-; CHECK-RV64V-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV64V-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV64V-NEXT: csrr a1, vlenb
-; CHECK-RV64V-NEXT: vl8re64.v v8, (a0)
+; CHECK-RV64V-NEXT: vl8re64.v v16, (a0)
; CHECK-RV64V-NEXT: slli a4, a1, 3
; CHECK-RV64V-NEXT: slli a3, a1, 1
-; CHECK-RV64V-NEXT: add a0, a0, a4
-; CHECK-RV64V-NEXT: sub a4, a2, a3
-; CHECK-RV64V-NEXT: vl8re64.v v0, (a0)
-; CHECK-RV64V-NEXT: sltu a0, a2, a4
-; CHECK-RV64V-NEXT: addi a0, a0, -1
-; CHECK-RV64V-NEXT: and a0, a0, a4
-; CHECK-RV64V-NEXT: sub a4, a0, a1
-; CHECK-RV64V-NEXT: sltu a5, a0, a4
+; CHECK-RV64V-NEXT: add a4, a0, a4
+; CHECK-RV64V-NEXT: sub a0, a2, a3
+; CHECK-RV64V-NEXT: sltu a5, a2, a0
; CHECK-RV64V-NEXT: addi a5, a5, -1
-; CHECK-RV64V-NEXT: and a4, a5, a4
+; CHECK-RV64V-NEXT: and a0, a5, a0
+; CHECK-RV64V-NEXT: sub a5, a0, a1
+; CHECK-RV64V-NEXT: sltu a6, a0, a5
+; CHECK-RV64V-NEXT: addi a6, a6, -1
+; CHECK-RV64V-NEXT: and a5, a6, a5
+; CHECK-RV64V-NEXT: vsetvli zero, a5, e64, m8, ta, ma
+; CHECK-RV64V-NEXT: vle64.v v0, (a4)
; CHECK-RV64V-NEXT: bltu a0, a1, .LBB851_2
; CHECK-RV64V-NEXT: # %bb.1:
; CHECK-RV64V-NEXT: mv a0, a1
; CHECK-RV64V-NEXT: .LBB851_2:
-; CHECK-RV64V-NEXT: vsetvli zero, a4, e8, m1, ta, ma
+; CHECK-RV64V-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.pall
; CHECK-RV64V-NEXT: vluxei64.v v27, (zero), v0
; CHECK-RV64V-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.pall
-; CHECK-RV64V-NEXT: vluxei64.v v26, (zero), v8
+; CHECK-RV64V-NEXT: vluxei64.v v26, (zero), v16
; CHECK-RV64V-NEXT: bltu a2, a3, .LBB851_4
; CHECK-RV64V-NEXT: # %bb.3:
; CHECK-RV64V-NEXT: mv a2, a3
@@ -29521,6 +29524,8 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_PALL(<vscale x 32
; CHECK-RV64V-NEXT: sltu a3, a2, a0
; CHECK-RV64V-NEXT: addi a3, a3, -1
; CHECK-RV64V-NEXT: and a0, a3, a0
+; CHECK-RV64V-NEXT: addi a3, sp, 16
+; CHECK-RV64V-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.pall
; CHECK-RV64V-NEXT: vluxei64.v v25, (zero), v16
@@ -29528,8 +29533,6 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_PALL(<vscale x 32
; CHECK-RV64V-NEXT: # %bb.5:
; CHECK-RV64V-NEXT: mv a2, a1
; CHECK-RV64V-NEXT: .LBB851_6:
-; CHECK-RV64V-NEXT: addi a0, sp, 16
-; CHECK-RV64V-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.pall
; CHECK-RV64V-NEXT: vluxei64.v v24, (zero), v8
@@ -29572,31 +29575,32 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_PALL(<vscale x 32
; CHECK-RV64VC-NEXT: sub sp, sp, a1
; CHECK-RV64VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV64VC-NEXT: addi a1, sp, 16
-; CHECK-RV64VC-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV64VC-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV64VC-NEXT: csrr a1, vlenb
-; CHECK-RV64VC-NEXT: vl8re64.v v8, (a0)
+; CHECK-RV64VC-NEXT: vl8re64.v v16, (a0)
; CHECK-RV64VC-NEXT: slli a4, a1, 3
; CHECK-RV64VC-NEXT: slli a3, a1, 1
-; CHECK-RV64VC-NEXT: add a0, a0, a4
-; CHECK-RV64VC-NEXT: sub a4, a2, a3
-; CHECK-RV64VC-NEXT: vl8re64.v v0, (a0)
-; CHECK-RV64VC-NEXT: sltu a0, a2, a4
-; CHECK-RV64VC-NEXT: addi a0, a0, -1
-; CHECK-RV64VC-NEXT: and a0, a0, a4
-; CHECK-RV64VC-NEXT: sub a4, a0, a1
-; CHECK-RV64VC-NEXT: sltu a5, a0, a4
+; CHECK-RV64VC-NEXT: add a6, a0, a4
+; CHECK-RV64VC-NEXT: sub a0, a2, a3
+; CHECK-RV64VC-NEXT: sltu a5, a2, a0
; CHECK-RV64VC-NEXT: addi a5, a5, -1
+; CHECK-RV64VC-NEXT: and a0, a0, a5
+; CHECK-RV64VC-NEXT: sub a5, a0, a1
+; CHECK-RV64VC-NEXT: sltu a4, a0, a5
+; CHECK-RV64VC-NEXT: addi a4, a4, -1
; CHECK-RV64VC-NEXT: and a4, a4, a5
+; CHECK-RV64VC-NEXT: vsetvli zero, a4, e64, m8, ta, ma
+; CHECK-RV64VC-NEXT: vle64.v v0, (a6)
; CHECK-RV64VC-NEXT: bltu a0, a1, .LBB851_2
; CHECK-RV64VC-NEXT: # %bb.1:
; CHECK-RV64VC-NEXT: mv a0, a1
; CHECK-RV64VC-NEXT: .LBB851_2:
-; CHECK-RV64VC-NEXT: vsetvli zero, a4, e8, m1, ta, ma
+; CHECK-RV64VC-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.pall
; CHECK-RV64VC-NEXT: vluxei64.v v27, (zero), v0
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.pall
-; CHECK-RV64VC-NEXT: vluxei64.v v26, (zero), v8
+; CHECK-RV64VC-NEXT: vluxei64.v v26, (zero), v16
; CHECK-RV64VC-NEXT: bltu a2, a3, .LBB851_4
; CHECK-RV64VC-NEXT: # %bb.3:
; CHECK-RV64VC-NEXT: mv a2, a3
@@ -29605,6 +29609,8 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_PALL(<vscale x 32
; CHECK-RV64VC-NEXT: sltu a3, a2, a0
; CHECK-RV64VC-NEXT: addi a3, a3, -1
; CHECK-RV64VC-NEXT: and a0, a0, a3
+; CHECK-RV64VC-NEXT: addi a3, sp, 16
+; CHECK-RV64VC-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.pall
; CHECK-RV64VC-NEXT: vluxei64.v v25, (zero), v16
@@ -29612,8 +29618,6 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_PALL(<vscale x 32
; CHECK-RV64VC-NEXT: # %bb.5:
; CHECK-RV64VC-NEXT: mv a2, a1
; CHECK-RV64VC-NEXT: .LBB851_6:
-; CHECK-RV64VC-NEXT: addi a0, sp, 16
-; CHECK-RV64VC-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.pall
; CHECK-RV64VC-NEXT: vluxei64.v v24, (zero), v8
@@ -29661,31 +29665,32 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_S1(<vscale x 32 x
; CHECK-RV64V-NEXT: sub sp, sp, a1
; CHECK-RV64V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV64V-NEXT: addi a1, sp, 16
-; CHECK-RV64V-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV64V-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV64V-NEXT: csrr a1, vlenb
-; CHECK-RV64V-NEXT: vl8re64.v v8, (a0)
+; CHECK-RV64V-NEXT: vl8re64.v v16, (a0)
; CHECK-RV64V-NEXT: slli a4, a1, 3
; CHECK-RV64V-NEXT: slli a3, a1, 1
-; CHECK-RV64V-NEXT: add a0, a0, a4
-; CHECK-RV64V-NEXT: sub a4, a2, a3
-; CHECK-RV64V-NEXT: vl8re64.v v0, (a0)
-; CHECK-RV64V-NEXT: sltu a0, a2, a4
-; CHECK-RV64V-NEXT: addi a0, a0, -1
-; CHECK-RV64V-NEXT: and a0, a0, a4
-; CHECK-RV64V-NEXT: sub a4, a0, a1
-; CHECK-RV64V-NEXT: sltu a5, a0, a4
+; CHECK-RV64V-NEXT: add a4, a0, a4
+; CHECK-RV64V-NEXT: sub a0, a2, a3
+; CHECK-RV64V-NEXT: sltu a5, a2, a0
; CHECK-RV64V-NEXT: addi a5, a5, -1
-; CHECK-RV64V-NEXT: and a4, a5, a4
+; CHECK-RV64V-NEXT: and a0, a5, a0
+; CHECK-RV64V-NEXT: sub a5, a0, a1
+; CHECK-RV64V-NEXT: sltu a6, a0, a5
+; CHECK-RV64V-NEXT: addi a6, a6, -1
+; CHECK-RV64V-NEXT: and a5, a6, a5
+; CHECK-RV64V-NEXT: vsetvli zero, a5, e64, m8, ta, ma
+; CHECK-RV64V-NEXT: vle64.v v0, (a4)
; CHECK-RV64V-NEXT: bltu a0, a1, .LBB852_2
; CHECK-RV64V-NEXT: # %bb.1:
; CHECK-RV64V-NEXT: mv a0, a1
; CHECK-RV64V-NEXT: .LBB852_2:
-; CHECK-RV64V-NEXT: vsetvli zero, a4, e8, m1, ta, ma
+; CHECK-RV64V-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.s1
; CHECK-RV64V-NEXT: vluxei64.v v27, (zero), v0
; CHECK-RV64V-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.s1
-; CHECK-RV64V-NEXT: vluxei64.v v26, (zero), v8
+; CHECK-RV64V-NEXT: vluxei64.v v26, (zero), v16
; CHECK-RV64V-NEXT: bltu a2, a3, .LBB852_4
; CHECK-RV64V-NEXT: # %bb.3:
; CHECK-RV64V-NEXT: mv a2, a3
@@ -29694,6 +29699,8 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_S1(<vscale x 32 x
; CHECK-RV64V-NEXT: sltu a3, a2, a0
; CHECK-RV64V-NEXT: addi a3, a3, -1
; CHECK-RV64V-NEXT: and a0, a3, a0
+; CHECK-RV64V-NEXT: addi a3, sp, 16
+; CHECK-RV64V-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.s1
; CHECK-RV64V-NEXT: vluxei64.v v25, (zero), v16
@@ -29701,8 +29708,6 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_S1(<vscale x 32 x
; CHECK-RV64V-NEXT: # %bb.5:
; CHECK-RV64V-NEXT: mv a2, a1
; CHECK-RV64V-NEXT: .LBB852_6:
-; CHECK-RV64V-NEXT: addi a0, sp, 16
-; CHECK-RV64V-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.s1
; CHECK-RV64V-NEXT: vluxei64.v v24, (zero), v8
@@ -29745,31 +29750,32 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_S1(<vscale x 32 x
; CHECK-RV64VC-NEXT: sub sp, sp, a1
; CHECK-RV64VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV64VC-NEXT: addi a1, sp, 16
-; CHECK-RV64VC-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV64VC-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV64VC-NEXT: csrr a1, vlenb
-; CHECK-RV64VC-NEXT: vl8re64.v v8, (a0)
+; CHECK-RV64VC-NEXT: vl8re64.v v16, (a0)
; CHECK-RV64VC-NEXT: slli a4, a1, 3
; CHECK-RV64VC-NEXT: slli a3, a1, 1
-; CHECK-RV64VC-NEXT: add a0, a0, a4
-; CHECK-RV64VC-NEXT: sub a4, a2, a3
-; CHECK-RV64VC-NEXT: vl8re64.v v0, (a0)
-; CHECK-RV64VC-NEXT: sltu a0, a2, a4
-; CHECK-RV64VC-NEXT: addi a0, a0, -1
-; CHECK-RV64VC-NEXT: and a0, a0, a4
-; CHECK-RV64VC-NEXT: sub a4, a0, a1
-; CHECK-RV64VC-NEXT: sltu a5, a0, a4
+; CHECK-RV64VC-NEXT: add a6, a0, a4
+; CHECK-RV64VC-NEXT: sub a0, a2, a3
+; CHECK-RV64VC-NEXT: sltu a5, a2, a0
; CHECK-RV64VC-NEXT: addi a5, a5, -1
+; CHECK-RV64VC-NEXT: and a0, a0, a5
+; CHECK-RV64VC-NEXT: sub a5, a0, a1
+; CHECK-RV64VC-NEXT: sltu a4, a0, a5
+; CHECK-RV64VC-NEXT: addi a4, a4, -1
; CHECK-RV64VC-NEXT: and a4, a4, a5
+; CHECK-RV64VC-NEXT: vsetvli zero, a4, e64, m8, ta, ma
+; CHECK-RV64VC-NEXT: vle64.v v0, (a6)
; CHECK-RV64VC-NEXT: bltu a0, a1, .LBB852_2
; CHECK-RV64VC-NEXT: # %bb.1:
; CHECK-RV64VC-NEXT: mv a0, a1
; CHECK-RV64VC-NEXT: .LBB852_2:
-; CHECK-RV64VC-NEXT: vsetvli zero, a4, e8, m1, ta, ma
+; CHECK-RV64VC-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.s1
; CHECK-RV64VC-NEXT: vluxei64.v v27, (zero), v0
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.s1
-; CHECK-RV64VC-NEXT: vluxei64.v v26, (zero), v8
+; CHECK-RV64VC-NEXT: vluxei64.v v26, (zero), v16
; CHECK-RV64VC-NEXT: bltu a2, a3, .LBB852_4
; CHECK-RV64VC-NEXT: # %bb.3:
; CHECK-RV64VC-NEXT: mv a2, a3
@@ -29778,6 +29784,8 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_S1(<vscale x 32 x
; CHECK-RV64VC-NEXT: sltu a3, a2, a0
; CHECK-RV64VC-NEXT: addi a3, a3, -1
; CHECK-RV64VC-NEXT: and a0, a0, a3
+; CHECK-RV64VC-NEXT: addi a3, sp, 16
+; CHECK-RV64VC-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.s1
; CHECK-RV64VC-NEXT: vluxei64.v v25, (zero), v16
@@ -29785,8 +29793,6 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_S1(<vscale x 32 x
; CHECK-RV64VC-NEXT: # %bb.5:
; CHECK-RV64VC-NEXT: mv a2, a1
; CHECK-RV64VC-NEXT: .LBB852_6:
-; CHECK-RV64VC-NEXT: addi a0, sp, 16
-; CHECK-RV64VC-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.s1
; CHECK-RV64VC-NEXT: vluxei64.v v24, (zero), v8
@@ -29834,31 +29840,32 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_ALL(<vscale x 32 x
; CHECK-RV64V-NEXT: sub sp, sp, a1
; CHECK-RV64V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV64V-NEXT: addi a1, sp, 16
-; CHECK-RV64V-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV64V-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV64V-NEXT: csrr a1, vlenb
-; CHECK-RV64V-NEXT: vl8re64.v v8, (a0)
+; CHECK-RV64V-NEXT: vl8re64.v v16, (a0)
; CHECK-RV64V-NEXT: slli a4, a1, 3
; CHECK-RV64V-NEXT: slli a3, a1, 1
-; CHECK-RV64V-NEXT: add a0, a0, a4
-; CHECK-RV64V-NEXT: sub a4, a2, a3
-; CHECK-RV64V-NEXT: vl8re64.v v0, (a0)
-; CHECK-RV64V-NEXT: sltu a0, a2, a4
-; CHECK-RV64V-NEXT: addi a0, a0, -1
-; CHECK-RV64V-NEXT: and a0, a0, a4
-; CHECK-RV64V-NEXT: sub a4, a0, a1
-; CHECK-RV64V-NEXT: sltu a5, a0, a4
+; CHECK-RV64V-NEXT: add a4, a0, a4
+; CHECK-RV64V-NEXT: sub a0, a2, a3
+; CHECK-RV64V-NEXT: sltu a5, a2, a0
; CHECK-RV64V-NEXT: addi a5, a5, -1
-; CHECK-RV64V-NEXT: and a4, a5, a4
+; CHECK-RV64V-NEXT: and a0, a5, a0
+; CHECK-RV64V-NEXT: sub a5, a0, a1
+; CHECK-RV64V-NEXT: sltu a6, a0, a5
+; CHECK-RV64V-NEXT: addi a6, a6, -1
+; CHECK-RV64V-NEXT: and a5, a6, a5
+; CHECK-RV64V-NEXT: vsetvli zero, a5, e64, m8, ta, ma
+; CHECK-RV64V-NEXT: vle64.v v0, (a4)
; CHECK-RV64V-NEXT: bltu a0, a1, .LBB853_2
; CHECK-RV64V-NEXT: # %bb.1:
; CHECK-RV64V-NEXT: mv a0, a1
; CHECK-RV64V-NEXT: .LBB853_2:
-; CHECK-RV64V-NEXT: vsetvli zero, a4, e8, m1, ta, ma
+; CHECK-RV64V-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.all
; CHECK-RV64V-NEXT: vluxei64.v v27, (zero), v0
; CHECK-RV64V-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.all
-; CHECK-RV64V-NEXT: vluxei64.v v26, (zero), v8
+; CHECK-RV64V-NEXT: vluxei64.v v26, (zero), v16
; CHECK-RV64V-NEXT: bltu a2, a3, .LBB853_4
; CHECK-RV64V-NEXT: # %bb.3:
; CHECK-RV64V-NEXT: mv a2, a3
@@ -29867,6 +29874,8 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_ALL(<vscale x 32 x
; CHECK-RV64V-NEXT: sltu a3, a2, a0
; CHECK-RV64V-NEXT: addi a3, a3, -1
; CHECK-RV64V-NEXT: and a0, a3, a0
+; CHECK-RV64V-NEXT: addi a3, sp, 16
+; CHECK-RV64V-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.all
; CHECK-RV64V-NEXT: vluxei64.v v25, (zero), v16
@@ -29874,8 +29883,6 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_ALL(<vscale x 32 x
; CHECK-RV64V-NEXT: # %bb.5:
; CHECK-RV64V-NEXT: mv a2, a1
; CHECK-RV64V-NEXT: .LBB853_6:
-; CHECK-RV64V-NEXT: addi a0, sp, 16
-; CHECK-RV64V-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.all
; CHECK-RV64V-NEXT: vluxei64.v v24, (zero), v8
@@ -29918,31 +29925,32 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_ALL(<vscale x 32 x
; CHECK-RV64VC-NEXT: sub sp, sp, a1
; CHECK-RV64VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV64VC-NEXT: addi a1, sp, 16
-; CHECK-RV64VC-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV64VC-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV64VC-NEXT: csrr a1, vlenb
-; CHECK-RV64VC-NEXT: vl8re64.v v8, (a0)
+; CHECK-RV64VC-NEXT: vl8re64.v v16, (a0)
; CHECK-RV64VC-NEXT: slli a4, a1, 3
; CHECK-RV64VC-NEXT: slli a3, a1, 1
-; CHECK-RV64VC-NEXT: add a0, a0, a4
-; CHECK-RV64VC-NEXT: sub a4, a2, a3
-; CHECK-RV64VC-NEXT: vl8re64.v v0, (a0)
-; CHECK-RV64VC-NEXT: sltu a0, a2, a4
-; CHECK-RV64VC-NEXT: addi a0, a0, -1
-; CHECK-RV64VC-NEXT: and a0, a0, a4
-; CHECK-RV64VC-NEXT: sub a4, a0, a1
-; CHECK-RV64VC-NEXT: sltu a5, a0, a4
+; CHECK-RV64VC-NEXT: add a6, a0, a4
+; CHECK-RV64VC-NEXT: sub a0, a2, a3
+; CHECK-RV64VC-NEXT: sltu a5, a2, a0
; CHECK-RV64VC-NEXT: addi a5, a5, -1
+; CHECK-RV64VC-NEXT: and a0, a0, a5
+; CHECK-RV64VC-NEXT: sub a5, a0, a1
+; CHECK-RV64VC-NEXT: sltu a4, a0, a5
+; CHECK-RV64VC-NEXT: addi a4, a4, -1
; CHECK-RV64VC-NEXT: and a4, a4, a5
+; CHECK-RV64VC-NEXT: vsetvli zero, a4, e64, m8, ta, ma
+; CHECK-RV64VC-NEXT: vle64.v v0, (a6)
; CHECK-RV64VC-NEXT: bltu a0, a1, .LBB853_2
; CHECK-RV64VC-NEXT: # %bb.1:
; CHECK-RV64VC-NEXT: mv a0, a1
; CHECK-RV64VC-NEXT: .LBB853_2:
-; CHECK-RV64VC-NEXT: vsetvli zero, a4, e8, m1, ta, ma
+; CHECK-RV64VC-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
; CHECK-RV64VC-NEXT: vluxei64.v v27, (zero), v0
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
-; CHECK-RV64VC-NEXT: vluxei64.v v26, (zero), v8
+; CHECK-RV64VC-NEXT: vluxei64.v v26, (zero), v16
; CHECK-RV64VC-NEXT: bltu a2, a3, .LBB853_4
; CHECK-RV64VC-NEXT: # %bb.3:
; CHECK-RV64VC-NEXT: mv a2, a3
@@ -29951,6 +29959,8 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_ALL(<vscale x 32 x
; CHECK-RV64VC-NEXT: sltu a3, a2, a0
; CHECK-RV64VC-NEXT: addi a3, a3, -1
; CHECK-RV64VC-NEXT: and a0, a0, a3
+; CHECK-RV64VC-NEXT: addi a3, sp, 16
+; CHECK-RV64VC-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
; CHECK-RV64VC-NEXT: vluxei64.v v25, (zero), v16
@@ -29958,8 +29968,6 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_ALL(<vscale x 32 x
; CHECK-RV64VC-NEXT: # %bb.5:
; CHECK-RV64VC-NEXT: mv a2, a1
; CHECK-RV64VC-NEXT: .LBB853_6:
-; CHECK-RV64VC-NEXT: addi a0, sp, 16
-; CHECK-RV64VC-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
; CHECK-RV64VC-NEXT: vluxei64.v v24, (zero), v8
@@ -30006,31 +30014,32 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_DEFAULT(<vscale x
; CHECK-RV64V-NEXT: sub sp, sp, a1
; CHECK-RV64V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV64V-NEXT: addi a1, sp, 16
-; CHECK-RV64V-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV64V-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV64V-NEXT: csrr a1, vlenb
-; CHECK-RV64V-NEXT: vl8re64.v v8, (a0)
+; CHECK-RV64V-NEXT: vl8re64.v v16, (a0)
; CHECK-RV64V-NEXT: slli a4, a1, 3
; CHECK-RV64V-NEXT: slli a3, a1, 1
-; CHECK-RV64V-NEXT: add a0, a0, a4
-; CHECK-RV64V-NEXT: sub a4, a2, a3
-; CHECK-RV64V-NEXT: vl8re64.v v0, (a0)
-; CHECK-RV64V-NEXT: sltu a0, a2, a4
-; CHECK-RV64V-NEXT: addi a0, a0, -1
-; CHECK-RV64V-NEXT: and a0, a0, a4
-; CHECK-RV64V-NEXT: sub a4, a0, a1
-; CHECK-RV64V-NEXT: sltu a5, a0, a4
+; CHECK-RV64V-NEXT: add a4, a0, a4
+; CHECK-RV64V-NEXT: sub a0, a2, a3
+; CHECK-RV64V-NEXT: sltu a5, a2, a0
; CHECK-RV64V-NEXT: addi a5, a5, -1
-; CHECK-RV64V-NEXT: and a4, a5, a4
+; CHECK-RV64V-NEXT: and a0, a5, a0
+; CHECK-RV64V-NEXT: sub a5, a0, a1
+; CHECK-RV64V-NEXT: sltu a6, a0, a5
+; CHECK-RV64V-NEXT: addi a6, a6, -1
+; CHECK-RV64V-NEXT: and a5, a6, a5
+; CHECK-RV64V-NEXT: vsetvli zero, a5, e64, m8, ta, ma
+; CHECK-RV64V-NEXT: vle64.v v0, (a4)
; CHECK-RV64V-NEXT: bltu a0, a1, .LBB854_2
; CHECK-RV64V-NEXT: # %bb.1:
; CHECK-RV64V-NEXT: mv a0, a1
; CHECK-RV64V-NEXT: .LBB854_2:
-; CHECK-RV64V-NEXT: vsetvli zero, a4, e8, m1, ta, ma
+; CHECK-RV64V-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.all
; CHECK-RV64V-NEXT: vluxei64.v v27, (zero), v0
; CHECK-RV64V-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.all
-; CHECK-RV64V-NEXT: vluxei64.v v26, (zero), v8
+; CHECK-RV64V-NEXT: vluxei64.v v26, (zero), v16
; CHECK-RV64V-NEXT: bltu a2, a3, .LBB854_4
; CHECK-RV64V-NEXT: # %bb.3:
; CHECK-RV64V-NEXT: mv a2, a3
@@ -30039,6 +30048,8 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_DEFAULT(<vscale x
; CHECK-RV64V-NEXT: sltu a3, a2, a0
; CHECK-RV64V-NEXT: addi a3, a3, -1
; CHECK-RV64V-NEXT: and a0, a3, a0
+; CHECK-RV64V-NEXT: addi a3, sp, 16
+; CHECK-RV64V-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.all
; CHECK-RV64V-NEXT: vluxei64.v v25, (zero), v16
@@ -30046,8 +30057,6 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_DEFAULT(<vscale x
; CHECK-RV64V-NEXT: # %bb.5:
; CHECK-RV64V-NEXT: mv a2, a1
; CHECK-RV64V-NEXT: .LBB854_6:
-; CHECK-RV64V-NEXT: addi a0, sp, 16
-; CHECK-RV64V-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.all
; CHECK-RV64V-NEXT: vluxei64.v v24, (zero), v8
@@ -30090,31 +30099,32 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_DEFAULT(<vscale x
; CHECK-RV64VC-NEXT: sub sp, sp, a1
; CHECK-RV64VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV64VC-NEXT: addi a1, sp, 16
-; CHECK-RV64VC-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV64VC-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV64VC-NEXT: csrr a1, vlenb
-; CHECK-RV64VC-NEXT: vl8re64.v v8, (a0)
+; CHECK-RV64VC-NEXT: vl8re64.v v16, (a0)
; CHECK-RV64VC-NEXT: slli a4, a1, 3
; CHECK-RV64VC-NEXT: slli a3, a1, 1
-; CHECK-RV64VC-NEXT: add a0, a0, a4
-; CHECK-RV64VC-NEXT: sub a4, a2, a3
-; CHECK-RV64VC-NEXT: vl8re64.v v0, (a0)
-; CHECK-RV64VC-NEXT: sltu a0, a2, a4
-; CHECK-RV64VC-NEXT: addi a0, a0, -1
-; CHECK-RV64VC-NEXT: and a0, a0, a4
-; CHECK-RV64VC-NEXT: sub a4, a0, a1
-; CHECK-RV64VC-NEXT: sltu a5, a0, a4
+; CHECK-RV64VC-NEXT: add a6, a0, a4
+; CHECK-RV64VC-NEXT: sub a0, a2, a3
+; CHECK-RV64VC-NEXT: sltu a5, a2, a0
; CHECK-RV64VC-NEXT: addi a5, a5, -1
+; CHECK-RV64VC-NEXT: and a0, a0, a5
+; CHECK-RV64VC-NEXT: sub a5, a0, a1
+; CHECK-RV64VC-NEXT: sltu a4, a0, a5
+; CHECK-RV64VC-NEXT: addi a4, a4, -1
; CHECK-RV64VC-NEXT: and a4, a4, a5
+; CHECK-RV64VC-NEXT: vsetvli zero, a4, e64, m8, ta, ma
+; CHECK-RV64VC-NEXT: vle64.v v0, (a6)
; CHECK-RV64VC-NEXT: bltu a0, a1, .LBB854_2
; CHECK-RV64VC-NEXT: # %bb.1:
; CHECK-RV64VC-NEXT: mv a0, a1
; CHECK-RV64VC-NEXT: .LBB854_2:
-; CHECK-RV64VC-NEXT: vsetvli zero, a4, e8, m1, ta, ma
+; CHECK-RV64VC-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
; CHECK-RV64VC-NEXT: vluxei64.v v27, (zero), v0
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
-; CHECK-RV64VC-NEXT: vluxei64.v v26, (zero), v8
+; CHECK-RV64VC-NEXT: vluxei64.v v26, (zero), v16
; CHECK-RV64VC-NEXT: bltu a2, a3, .LBB854_4
; CHECK-RV64VC-NEXT: # %bb.3:
; CHECK-RV64VC-NEXT: mv a2, a3
@@ -30123,6 +30133,8 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_DEFAULT(<vscale x
; CHECK-RV64VC-NEXT: sltu a3, a2, a0
; CHECK-RV64VC-NEXT: addi a3, a3, -1
; CHECK-RV64VC-NEXT: and a0, a0, a3
+; CHECK-RV64VC-NEXT: addi a3, sp, 16
+; CHECK-RV64VC-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
; CHECK-RV64VC-NEXT: vluxei64.v v25, (zero), v16
@@ -30130,8 +30142,6 @@ define <vscale x 32 x i8> @test_nontemporal_vp_gather_nxv32i8_DEFAULT(<vscale x
; CHECK-RV64VC-NEXT: # %bb.5:
; CHECK-RV64VC-NEXT: mv a2, a1
; CHECK-RV64VC-NEXT: .LBB854_6:
-; CHECK-RV64VC-NEXT: addi a0, sp, 16
-; CHECK-RV64VC-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
; CHECK-RV64VC-NEXT: vluxei64.v v24, (zero), v8
@@ -32019,31 +32029,32 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_P1(<vscale x 32
; CHECK-RV64V-NEXT: sub sp, sp, a1
; CHECK-RV64V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV64V-NEXT: addi a1, sp, 16
-; CHECK-RV64V-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV64V-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV64V-NEXT: csrr a1, vlenb
-; CHECK-RV64V-NEXT: vl8re64.v v8, (a0)
+; CHECK-RV64V-NEXT: vl8re64.v v16, (a0)
; CHECK-RV64V-NEXT: slli a4, a1, 3
; CHECK-RV64V-NEXT: slli a3, a1, 1
-; CHECK-RV64V-NEXT: add a0, a0, a4
-; CHECK-RV64V-NEXT: sub a4, a2, a3
-; CHECK-RV64V-NEXT: vl8re64.v v0, (a0)
-; CHECK-RV64V-NEXT: sltu a0, a2, a4
-; CHECK-RV64V-NEXT: addi a0, a0, -1
-; CHECK-RV64V-NEXT: and a0, a0, a4
-; CHECK-RV64V-NEXT: sub a4, a0, a1
-; CHECK-RV64V-NEXT: sltu a5, a0, a4
+; CHECK-RV64V-NEXT: add a4, a0, a4
+; CHECK-RV64V-NEXT: sub a0, a2, a3
+; CHECK-RV64V-NEXT: sltu a5, a2, a0
; CHECK-RV64V-NEXT: addi a5, a5, -1
-; CHECK-RV64V-NEXT: and a4, a5, a4
+; CHECK-RV64V-NEXT: and a0, a5, a0
+; CHECK-RV64V-NEXT: sub a5, a0, a1
+; CHECK-RV64V-NEXT: sltu a6, a0, a5
+; CHECK-RV64V-NEXT: addi a6, a6, -1
+; CHECK-RV64V-NEXT: and a5, a6, a5
+; CHECK-RV64V-NEXT: vsetvli zero, a5, e64, m8, ta, ma
+; CHECK-RV64V-NEXT: vle64.v v0, (a4)
; CHECK-RV64V-NEXT: bltu a0, a1, .LBB880_2
; CHECK-RV64V-NEXT: # %bb.1:
; CHECK-RV64V-NEXT: mv a0, a1
; CHECK-RV64V-NEXT: .LBB880_2:
-; CHECK-RV64V-NEXT: vsetvli zero, a4, e16, m2, ta, ma
+; CHECK-RV64V-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-RV64V-NEXT: ntl.p1
; CHECK-RV64V-NEXT: vluxei64.v v30, (zero), v0
; CHECK-RV64V-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-RV64V-NEXT: ntl.p1
-; CHECK-RV64V-NEXT: vluxei64.v v28, (zero), v8
+; CHECK-RV64V-NEXT: vluxei64.v v28, (zero), v16
; CHECK-RV64V-NEXT: bltu a2, a3, .LBB880_4
; CHECK-RV64V-NEXT: # %bb.3:
; CHECK-RV64V-NEXT: mv a2, a3
@@ -32052,6 +32063,8 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_P1(<vscale x 32
; CHECK-RV64V-NEXT: sltu a3, a2, a0
; CHECK-RV64V-NEXT: addi a3, a3, -1
; CHECK-RV64V-NEXT: and a0, a3, a0
+; CHECK-RV64V-NEXT: addi a3, sp, 16
+; CHECK-RV64V-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-RV64V-NEXT: ntl.p1
; CHECK-RV64V-NEXT: vluxei64.v v26, (zero), v16
@@ -32059,8 +32072,6 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_P1(<vscale x 32
; CHECK-RV64V-NEXT: # %bb.5:
; CHECK-RV64V-NEXT: mv a2, a1
; CHECK-RV64V-NEXT: .LBB880_6:
-; CHECK-RV64V-NEXT: addi a0, sp, 16
-; CHECK-RV64V-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-RV64V-NEXT: ntl.p1
; CHECK-RV64V-NEXT: vluxei64.v v24, (zero), v8
@@ -32103,31 +32114,32 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_P1(<vscale x 32
; CHECK-RV64VC-NEXT: sub sp, sp, a1
; CHECK-RV64VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV64VC-NEXT: addi a1, sp, 16
-; CHECK-RV64VC-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV64VC-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV64VC-NEXT: csrr a1, vlenb
-; CHECK-RV64VC-NEXT: vl8re64.v v8, (a0)
+; CHECK-RV64VC-NEXT: vl8re64.v v16, (a0)
; CHECK-RV64VC-NEXT: slli a4, a1, 3
; CHECK-RV64VC-NEXT: slli a3, a1, 1
-; CHECK-RV64VC-NEXT: add a0, a0, a4
-; CHECK-RV64VC-NEXT: sub a4, a2, a3
-; CHECK-RV64VC-NEXT: vl8re64.v v0, (a0)
-; CHECK-RV64VC-NEXT: sltu a0, a2, a4
-; CHECK-RV64VC-NEXT: addi a0, a0, -1
-; CHECK-RV64VC-NEXT: and a0, a0, a4
-; CHECK-RV64VC-NEXT: sub a4, a0, a1
-; CHECK-RV64VC-NEXT: sltu a5, a0, a4
+; CHECK-RV64VC-NEXT: add a6, a0, a4
+; CHECK-RV64VC-NEXT: sub a0, a2, a3
+; CHECK-RV64VC-NEXT: sltu a5, a2, a0
; CHECK-RV64VC-NEXT: addi a5, a5, -1
+; CHECK-RV64VC-NEXT: and a0, a0, a5
+; CHECK-RV64VC-NEXT: sub a5, a0, a1
+; CHECK-RV64VC-NEXT: sltu a4, a0, a5
+; CHECK-RV64VC-NEXT: addi a4, a4, -1
; CHECK-RV64VC-NEXT: and a4, a4, a5
+; CHECK-RV64VC-NEXT: vsetvli zero, a4, e64, m8, ta, ma
+; CHECK-RV64VC-NEXT: vle64.v v0, (a6)
; CHECK-RV64VC-NEXT: bltu a0, a1, .LBB880_2
; CHECK-RV64VC-NEXT: # %bb.1:
; CHECK-RV64VC-NEXT: mv a0, a1
; CHECK-RV64VC-NEXT: .LBB880_2:
-; CHECK-RV64VC-NEXT: vsetvli zero, a4, e16, m2, ta, ma
+; CHECK-RV64VC-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.p1
; CHECK-RV64VC-NEXT: vluxei64.v v30, (zero), v0
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.p1
-; CHECK-RV64VC-NEXT: vluxei64.v v28, (zero), v8
+; CHECK-RV64VC-NEXT: vluxei64.v v28, (zero), v16
; CHECK-RV64VC-NEXT: bltu a2, a3, .LBB880_4
; CHECK-RV64VC-NEXT: # %bb.3:
; CHECK-RV64VC-NEXT: mv a2, a3
@@ -32136,6 +32148,8 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_P1(<vscale x 32
; CHECK-RV64VC-NEXT: sltu a3, a2, a0
; CHECK-RV64VC-NEXT: addi a3, a3, -1
; CHECK-RV64VC-NEXT: and a0, a0, a3
+; CHECK-RV64VC-NEXT: addi a3, sp, 16
+; CHECK-RV64VC-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.p1
; CHECK-RV64VC-NEXT: vluxei64.v v26, (zero), v16
@@ -32143,8 +32157,6 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_P1(<vscale x 32
; CHECK-RV64VC-NEXT: # %bb.5:
; CHECK-RV64VC-NEXT: mv a2, a1
; CHECK-RV64VC-NEXT: .LBB880_6:
-; CHECK-RV64VC-NEXT: addi a0, sp, 16
-; CHECK-RV64VC-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.p1
; CHECK-RV64VC-NEXT: vluxei64.v v24, (zero), v8
@@ -32192,31 +32204,32 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_PALL(<vscale x 3
; CHECK-RV64V-NEXT: sub sp, sp, a1
; CHECK-RV64V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV64V-NEXT: addi a1, sp, 16
-; CHECK-RV64V-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV64V-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV64V-NEXT: csrr a1, vlenb
-; CHECK-RV64V-NEXT: vl8re64.v v8, (a0)
+; CHECK-RV64V-NEXT: vl8re64.v v16, (a0)
; CHECK-RV64V-NEXT: slli a4, a1, 3
; CHECK-RV64V-NEXT: slli a3, a1, 1
-; CHECK-RV64V-NEXT: add a0, a0, a4
-; CHECK-RV64V-NEXT: sub a4, a2, a3
-; CHECK-RV64V-NEXT: vl8re64.v v0, (a0)
-; CHECK-RV64V-NEXT: sltu a0, a2, a4
-; CHECK-RV64V-NEXT: addi a0, a0, -1
-; CHECK-RV64V-NEXT: and a0, a0, a4
-; CHECK-RV64V-NEXT: sub a4, a0, a1
-; CHECK-RV64V-NEXT: sltu a5, a0, a4
+; CHECK-RV64V-NEXT: add a4, a0, a4
+; CHECK-RV64V-NEXT: sub a0, a2, a3
+; CHECK-RV64V-NEXT: sltu a5, a2, a0
; CHECK-RV64V-NEXT: addi a5, a5, -1
-; CHECK-RV64V-NEXT: and a4, a5, a4
+; CHECK-RV64V-NEXT: and a0, a5, a0
+; CHECK-RV64V-NEXT: sub a5, a0, a1
+; CHECK-RV64V-NEXT: sltu a6, a0, a5
+; CHECK-RV64V-NEXT: addi a6, a6, -1
+; CHECK-RV64V-NEXT: and a5, a6, a5
+; CHECK-RV64V-NEXT: vsetvli zero, a5, e64, m8, ta, ma
+; CHECK-RV64V-NEXT: vle64.v v0, (a4)
; CHECK-RV64V-NEXT: bltu a0, a1, .LBB881_2
; CHECK-RV64V-NEXT: # %bb.1:
; CHECK-RV64V-NEXT: mv a0, a1
; CHECK-RV64V-NEXT: .LBB881_2:
-; CHECK-RV64V-NEXT: vsetvli zero, a4, e16, m2, ta, ma
+; CHECK-RV64V-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-RV64V-NEXT: ntl.pall
; CHECK-RV64V-NEXT: vluxei64.v v30, (zero), v0
; CHECK-RV64V-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-RV64V-NEXT: ntl.pall
-; CHECK-RV64V-NEXT: vluxei64.v v28, (zero), v8
+; CHECK-RV64V-NEXT: vluxei64.v v28, (zero), v16
; CHECK-RV64V-NEXT: bltu a2, a3, .LBB881_4
; CHECK-RV64V-NEXT: # %bb.3:
; CHECK-RV64V-NEXT: mv a2, a3
@@ -32225,6 +32238,8 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_PALL(<vscale x 3
; CHECK-RV64V-NEXT: sltu a3, a2, a0
; CHECK-RV64V-NEXT: addi a3, a3, -1
; CHECK-RV64V-NEXT: and a0, a3, a0
+; CHECK-RV64V-NEXT: addi a3, sp, 16
+; CHECK-RV64V-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-RV64V-NEXT: ntl.pall
; CHECK-RV64V-NEXT: vluxei64.v v26, (zero), v16
@@ -32232,8 +32247,6 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_PALL(<vscale x 3
; CHECK-RV64V-NEXT: # %bb.5:
; CHECK-RV64V-NEXT: mv a2, a1
; CHECK-RV64V-NEXT: .LBB881_6:
-; CHECK-RV64V-NEXT: addi a0, sp, 16
-; CHECK-RV64V-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-RV64V-NEXT: ntl.pall
; CHECK-RV64V-NEXT: vluxei64.v v24, (zero), v8
@@ -32276,31 +32289,32 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_PALL(<vscale x 3
; CHECK-RV64VC-NEXT: sub sp, sp, a1
; CHECK-RV64VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV64VC-NEXT: addi a1, sp, 16
-; CHECK-RV64VC-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV64VC-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV64VC-NEXT: csrr a1, vlenb
-; CHECK-RV64VC-NEXT: vl8re64.v v8, (a0)
+; CHECK-RV64VC-NEXT: vl8re64.v v16, (a0)
; CHECK-RV64VC-NEXT: slli a4, a1, 3
; CHECK-RV64VC-NEXT: slli a3, a1, 1
-; CHECK-RV64VC-NEXT: add a0, a0, a4
-; CHECK-RV64VC-NEXT: sub a4, a2, a3
-; CHECK-RV64VC-NEXT: vl8re64.v v0, (a0)
-; CHECK-RV64VC-NEXT: sltu a0, a2, a4
-; CHECK-RV64VC-NEXT: addi a0, a0, -1
-; CHECK-RV64VC-NEXT: and a0, a0, a4
-; CHECK-RV64VC-NEXT: sub a4, a0, a1
-; CHECK-RV64VC-NEXT: sltu a5, a0, a4
+; CHECK-RV64VC-NEXT: add a6, a0, a4
+; CHECK-RV64VC-NEXT: sub a0, a2, a3
+; CHECK-RV64VC-NEXT: sltu a5, a2, a0
; CHECK-RV64VC-NEXT: addi a5, a5, -1
+; CHECK-RV64VC-NEXT: and a0, a0, a5
+; CHECK-RV64VC-NEXT: sub a5, a0, a1
+; CHECK-RV64VC-NEXT: sltu a4, a0, a5
+; CHECK-RV64VC-NEXT: addi a4, a4, -1
; CHECK-RV64VC-NEXT: and a4, a4, a5
+; CHECK-RV64VC-NEXT: vsetvli zero, a4, e64, m8, ta, ma
+; CHECK-RV64VC-NEXT: vle64.v v0, (a6)
; CHECK-RV64VC-NEXT: bltu a0, a1, .LBB881_2
; CHECK-RV64VC-NEXT: # %bb.1:
; CHECK-RV64VC-NEXT: mv a0, a1
; CHECK-RV64VC-NEXT: .LBB881_2:
-; CHECK-RV64VC-NEXT: vsetvli zero, a4, e16, m2, ta, ma
+; CHECK-RV64VC-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.pall
; CHECK-RV64VC-NEXT: vluxei64.v v30, (zero), v0
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.pall
-; CHECK-RV64VC-NEXT: vluxei64.v v28, (zero), v8
+; CHECK-RV64VC-NEXT: vluxei64.v v28, (zero), v16
; CHECK-RV64VC-NEXT: bltu a2, a3, .LBB881_4
; CHECK-RV64VC-NEXT: # %bb.3:
; CHECK-RV64VC-NEXT: mv a2, a3
@@ -32309,6 +32323,8 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_PALL(<vscale x 3
; CHECK-RV64VC-NEXT: sltu a3, a2, a0
; CHECK-RV64VC-NEXT: addi a3, a3, -1
; CHECK-RV64VC-NEXT: and a0, a0, a3
+; CHECK-RV64VC-NEXT: addi a3, sp, 16
+; CHECK-RV64VC-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.pall
; CHECK-RV64VC-NEXT: vluxei64.v v26, (zero), v16
@@ -32316,8 +32332,6 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_PALL(<vscale x 3
; CHECK-RV64VC-NEXT: # %bb.5:
; CHECK-RV64VC-NEXT: mv a2, a1
; CHECK-RV64VC-NEXT: .LBB881_6:
-; CHECK-RV64VC-NEXT: addi a0, sp, 16
-; CHECK-RV64VC-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.pall
; CHECK-RV64VC-NEXT: vluxei64.v v24, (zero), v8
@@ -32365,31 +32379,32 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_S1(<vscale x 32
; CHECK-RV64V-NEXT: sub sp, sp, a1
; CHECK-RV64V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV64V-NEXT: addi a1, sp, 16
-; CHECK-RV64V-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV64V-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV64V-NEXT: csrr a1, vlenb
-; CHECK-RV64V-NEXT: vl8re64.v v8, (a0)
+; CHECK-RV64V-NEXT: vl8re64.v v16, (a0)
; CHECK-RV64V-NEXT: slli a4, a1, 3
; CHECK-RV64V-NEXT: slli a3, a1, 1
-; CHECK-RV64V-NEXT: add a0, a0, a4
-; CHECK-RV64V-NEXT: sub a4, a2, a3
-; CHECK-RV64V-NEXT: vl8re64.v v0, (a0)
-; CHECK-RV64V-NEXT: sltu a0, a2, a4
-; CHECK-RV64V-NEXT: addi a0, a0, -1
-; CHECK-RV64V-NEXT: and a0, a0, a4
-; CHECK-RV64V-NEXT: sub a4, a0, a1
-; CHECK-RV64V-NEXT: sltu a5, a0, a4
+; CHECK-RV64V-NEXT: add a4, a0, a4
+; CHECK-RV64V-NEXT: sub a0, a2, a3
+; CHECK-RV64V-NEXT: sltu a5, a2, a0
; CHECK-RV64V-NEXT: addi a5, a5, -1
-; CHECK-RV64V-NEXT: and a4, a5, a4
+; CHECK-RV64V-NEXT: and a0, a5, a0
+; CHECK-RV64V-NEXT: sub a5, a0, a1
+; CHECK-RV64V-NEXT: sltu a6, a0, a5
+; CHECK-RV64V-NEXT: addi a6, a6, -1
+; CHECK-RV64V-NEXT: and a5, a6, a5
+; CHECK-RV64V-NEXT: vsetvli zero, a5, e64, m8, ta, ma
+; CHECK-RV64V-NEXT: vle64.v v0, (a4)
; CHECK-RV64V-NEXT: bltu a0, a1, .LBB882_2
; CHECK-RV64V-NEXT: # %bb.1:
; CHECK-RV64V-NEXT: mv a0, a1
; CHECK-RV64V-NEXT: .LBB882_2:
-; CHECK-RV64V-NEXT: vsetvli zero, a4, e16, m2, ta, ma
+; CHECK-RV64V-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-RV64V-NEXT: ntl.s1
; CHECK-RV64V-NEXT: vluxei64.v v30, (zero), v0
; CHECK-RV64V-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-RV64V-NEXT: ntl.s1
-; CHECK-RV64V-NEXT: vluxei64.v v28, (zero), v8
+; CHECK-RV64V-NEXT: vluxei64.v v28, (zero), v16
; CHECK-RV64V-NEXT: bltu a2, a3, .LBB882_4
; CHECK-RV64V-NEXT: # %bb.3:
; CHECK-RV64V-NEXT: mv a2, a3
@@ -32398,6 +32413,8 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_S1(<vscale x 32
; CHECK-RV64V-NEXT: sltu a3, a2, a0
; CHECK-RV64V-NEXT: addi a3, a3, -1
; CHECK-RV64V-NEXT: and a0, a3, a0
+; CHECK-RV64V-NEXT: addi a3, sp, 16
+; CHECK-RV64V-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-RV64V-NEXT: ntl.s1
; CHECK-RV64V-NEXT: vluxei64.v v26, (zero), v16
@@ -32405,8 +32422,6 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_S1(<vscale x 32
; CHECK-RV64V-NEXT: # %bb.5:
; CHECK-RV64V-NEXT: mv a2, a1
; CHECK-RV64V-NEXT: .LBB882_6:
-; CHECK-RV64V-NEXT: addi a0, sp, 16
-; CHECK-RV64V-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-RV64V-NEXT: ntl.s1
; CHECK-RV64V-NEXT: vluxei64.v v24, (zero), v8
@@ -32449,31 +32464,32 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_S1(<vscale x 32
; CHECK-RV64VC-NEXT: sub sp, sp, a1
; CHECK-RV64VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV64VC-NEXT: addi a1, sp, 16
-; CHECK-RV64VC-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV64VC-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV64VC-NEXT: csrr a1, vlenb
-; CHECK-RV64VC-NEXT: vl8re64.v v8, (a0)
+; CHECK-RV64VC-NEXT: vl8re64.v v16, (a0)
; CHECK-RV64VC-NEXT: slli a4, a1, 3
; CHECK-RV64VC-NEXT: slli a3, a1, 1
-; CHECK-RV64VC-NEXT: add a0, a0, a4
-; CHECK-RV64VC-NEXT: sub a4, a2, a3
-; CHECK-RV64VC-NEXT: vl8re64.v v0, (a0)
-; CHECK-RV64VC-NEXT: sltu a0, a2, a4
-; CHECK-RV64VC-NEXT: addi a0, a0, -1
-; CHECK-RV64VC-NEXT: and a0, a0, a4
-; CHECK-RV64VC-NEXT: sub a4, a0, a1
-; CHECK-RV64VC-NEXT: sltu a5, a0, a4
+; CHECK-RV64VC-NEXT: add a6, a0, a4
+; CHECK-RV64VC-NEXT: sub a0, a2, a3
+; CHECK-RV64VC-NEXT: sltu a5, a2, a0
; CHECK-RV64VC-NEXT: addi a5, a5, -1
+; CHECK-RV64VC-NEXT: and a0, a0, a5
+; CHECK-RV64VC-NEXT: sub a5, a0, a1
+; CHECK-RV64VC-NEXT: sltu a4, a0, a5
+; CHECK-RV64VC-NEXT: addi a4, a4, -1
; CHECK-RV64VC-NEXT: and a4, a4, a5
+; CHECK-RV64VC-NEXT: vsetvli zero, a4, e64, m8, ta, ma
+; CHECK-RV64VC-NEXT: vle64.v v0, (a6)
; CHECK-RV64VC-NEXT: bltu a0, a1, .LBB882_2
; CHECK-RV64VC-NEXT: # %bb.1:
; CHECK-RV64VC-NEXT: mv a0, a1
; CHECK-RV64VC-NEXT: .LBB882_2:
-; CHECK-RV64VC-NEXT: vsetvli zero, a4, e16, m2, ta, ma
+; CHECK-RV64VC-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.s1
; CHECK-RV64VC-NEXT: vluxei64.v v30, (zero), v0
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.s1
-; CHECK-RV64VC-NEXT: vluxei64.v v28, (zero), v8
+; CHECK-RV64VC-NEXT: vluxei64.v v28, (zero), v16
; CHECK-RV64VC-NEXT: bltu a2, a3, .LBB882_4
; CHECK-RV64VC-NEXT: # %bb.3:
; CHECK-RV64VC-NEXT: mv a2, a3
@@ -32482,6 +32498,8 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_S1(<vscale x 32
; CHECK-RV64VC-NEXT: sltu a3, a2, a0
; CHECK-RV64VC-NEXT: addi a3, a3, -1
; CHECK-RV64VC-NEXT: and a0, a0, a3
+; CHECK-RV64VC-NEXT: addi a3, sp, 16
+; CHECK-RV64VC-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.s1
; CHECK-RV64VC-NEXT: vluxei64.v v26, (zero), v16
@@ -32489,8 +32507,6 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_S1(<vscale x 32
; CHECK-RV64VC-NEXT: # %bb.5:
; CHECK-RV64VC-NEXT: mv a2, a1
; CHECK-RV64VC-NEXT: .LBB882_6:
-; CHECK-RV64VC-NEXT: addi a0, sp, 16
-; CHECK-RV64VC-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.s1
; CHECK-RV64VC-NEXT: vluxei64.v v24, (zero), v8
@@ -32538,31 +32554,32 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_ALL(<vscale x 32
; CHECK-RV64V-NEXT: sub sp, sp, a1
; CHECK-RV64V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV64V-NEXT: addi a1, sp, 16
-; CHECK-RV64V-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV64V-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV64V-NEXT: csrr a1, vlenb
-; CHECK-RV64V-NEXT: vl8re64.v v8, (a0)
+; CHECK-RV64V-NEXT: vl8re64.v v16, (a0)
; CHECK-RV64V-NEXT: slli a4, a1, 3
; CHECK-RV64V-NEXT: slli a3, a1, 1
-; CHECK-RV64V-NEXT: add a0, a0, a4
-; CHECK-RV64V-NEXT: sub a4, a2, a3
-; CHECK-RV64V-NEXT: vl8re64.v v0, (a0)
-; CHECK-RV64V-NEXT: sltu a0, a2, a4
-; CHECK-RV64V-NEXT: addi a0, a0, -1
-; CHECK-RV64V-NEXT: and a0, a0, a4
-; CHECK-RV64V-NEXT: sub a4, a0, a1
-; CHECK-RV64V-NEXT: sltu a5, a0, a4
+; CHECK-RV64V-NEXT: add a4, a0, a4
+; CHECK-RV64V-NEXT: sub a0, a2, a3
+; CHECK-RV64V-NEXT: sltu a5, a2, a0
; CHECK-RV64V-NEXT: addi a5, a5, -1
-; CHECK-RV64V-NEXT: and a4, a5, a4
+; CHECK-RV64V-NEXT: and a0, a5, a0
+; CHECK-RV64V-NEXT: sub a5, a0, a1
+; CHECK-RV64V-NEXT: sltu a6, a0, a5
+; CHECK-RV64V-NEXT: addi a6, a6, -1
+; CHECK-RV64V-NEXT: and a5, a6, a5
+; CHECK-RV64V-NEXT: vsetvli zero, a5, e64, m8, ta, ma
+; CHECK-RV64V-NEXT: vle64.v v0, (a4)
; CHECK-RV64V-NEXT: bltu a0, a1, .LBB883_2
; CHECK-RV64V-NEXT: # %bb.1:
; CHECK-RV64V-NEXT: mv a0, a1
; CHECK-RV64V-NEXT: .LBB883_2:
-; CHECK-RV64V-NEXT: vsetvli zero, a4, e16, m2, ta, ma
+; CHECK-RV64V-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-RV64V-NEXT: ntl.all
; CHECK-RV64V-NEXT: vluxei64.v v30, (zero), v0
; CHECK-RV64V-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-RV64V-NEXT: ntl.all
-; CHECK-RV64V-NEXT: vluxei64.v v28, (zero), v8
+; CHECK-RV64V-NEXT: vluxei64.v v28, (zero), v16
; CHECK-RV64V-NEXT: bltu a2, a3, .LBB883_4
; CHECK-RV64V-NEXT: # %bb.3:
; CHECK-RV64V-NEXT: mv a2, a3
@@ -32571,6 +32588,8 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_ALL(<vscale x 32
; CHECK-RV64V-NEXT: sltu a3, a2, a0
; CHECK-RV64V-NEXT: addi a3, a3, -1
; CHECK-RV64V-NEXT: and a0, a3, a0
+; CHECK-RV64V-NEXT: addi a3, sp, 16
+; CHECK-RV64V-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-RV64V-NEXT: ntl.all
; CHECK-RV64V-NEXT: vluxei64.v v26, (zero), v16
@@ -32578,8 +32597,6 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_ALL(<vscale x 32
; CHECK-RV64V-NEXT: # %bb.5:
; CHECK-RV64V-NEXT: mv a2, a1
; CHECK-RV64V-NEXT: .LBB883_6:
-; CHECK-RV64V-NEXT: addi a0, sp, 16
-; CHECK-RV64V-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-RV64V-NEXT: ntl.all
; CHECK-RV64V-NEXT: vluxei64.v v24, (zero), v8
@@ -32622,31 +32639,32 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_ALL(<vscale x 32
; CHECK-RV64VC-NEXT: sub sp, sp, a1
; CHECK-RV64VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV64VC-NEXT: addi a1, sp, 16
-; CHECK-RV64VC-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV64VC-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV64VC-NEXT: csrr a1, vlenb
-; CHECK-RV64VC-NEXT: vl8re64.v v8, (a0)
+; CHECK-RV64VC-NEXT: vl8re64.v v16, (a0)
; CHECK-RV64VC-NEXT: slli a4, a1, 3
; CHECK-RV64VC-NEXT: slli a3, a1, 1
-; CHECK-RV64VC-NEXT: add a0, a0, a4
-; CHECK-RV64VC-NEXT: sub a4, a2, a3
-; CHECK-RV64VC-NEXT: vl8re64.v v0, (a0)
-; CHECK-RV64VC-NEXT: sltu a0, a2, a4
-; CHECK-RV64VC-NEXT: addi a0, a0, -1
-; CHECK-RV64VC-NEXT: and a0, a0, a4
-; CHECK-RV64VC-NEXT: sub a4, a0, a1
-; CHECK-RV64VC-NEXT: sltu a5, a0, a4
+; CHECK-RV64VC-NEXT: add a6, a0, a4
+; CHECK-RV64VC-NEXT: sub a0, a2, a3
+; CHECK-RV64VC-NEXT: sltu a5, a2, a0
; CHECK-RV64VC-NEXT: addi a5, a5, -1
+; CHECK-RV64VC-NEXT: and a0, a0, a5
+; CHECK-RV64VC-NEXT: sub a5, a0, a1
+; CHECK-RV64VC-NEXT: sltu a4, a0, a5
+; CHECK-RV64VC-NEXT: addi a4, a4, -1
; CHECK-RV64VC-NEXT: and a4, a4, a5
+; CHECK-RV64VC-NEXT: vsetvli zero, a4, e64, m8, ta, ma
+; CHECK-RV64VC-NEXT: vle64.v v0, (a6)
; CHECK-RV64VC-NEXT: bltu a0, a1, .LBB883_2
; CHECK-RV64VC-NEXT: # %bb.1:
; CHECK-RV64VC-NEXT: mv a0, a1
; CHECK-RV64VC-NEXT: .LBB883_2:
-; CHECK-RV64VC-NEXT: vsetvli zero, a4, e16, m2, ta, ma
+; CHECK-RV64VC-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
; CHECK-RV64VC-NEXT: vluxei64.v v30, (zero), v0
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
-; CHECK-RV64VC-NEXT: vluxei64.v v28, (zero), v8
+; CHECK-RV64VC-NEXT: vluxei64.v v28, (zero), v16
; CHECK-RV64VC-NEXT: bltu a2, a3, .LBB883_4
; CHECK-RV64VC-NEXT: # %bb.3:
; CHECK-RV64VC-NEXT: mv a2, a3
@@ -32655,6 +32673,8 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_ALL(<vscale x 32
; CHECK-RV64VC-NEXT: sltu a3, a2, a0
; CHECK-RV64VC-NEXT: addi a3, a3, -1
; CHECK-RV64VC-NEXT: and a0, a0, a3
+; CHECK-RV64VC-NEXT: addi a3, sp, 16
+; CHECK-RV64VC-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
; CHECK-RV64VC-NEXT: vluxei64.v v26, (zero), v16
@@ -32662,8 +32682,6 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_ALL(<vscale x 32
; CHECK-RV64VC-NEXT: # %bb.5:
; CHECK-RV64VC-NEXT: mv a2, a1
; CHECK-RV64VC-NEXT: .LBB883_6:
-; CHECK-RV64VC-NEXT: addi a0, sp, 16
-; CHECK-RV64VC-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
; CHECK-RV64VC-NEXT: vluxei64.v v24, (zero), v8
@@ -32710,31 +32728,32 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_DEFAULT(<vscale
; CHECK-RV64V-NEXT: sub sp, sp, a1
; CHECK-RV64V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV64V-NEXT: addi a1, sp, 16
-; CHECK-RV64V-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV64V-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV64V-NEXT: csrr a1, vlenb
-; CHECK-RV64V-NEXT: vl8re64.v v8, (a0)
+; CHECK-RV64V-NEXT: vl8re64.v v16, (a0)
; CHECK-RV64V-NEXT: slli a4, a1, 3
; CHECK-RV64V-NEXT: slli a3, a1, 1
-; CHECK-RV64V-NEXT: add a0, a0, a4
-; CHECK-RV64V-NEXT: sub a4, a2, a3
-; CHECK-RV64V-NEXT: vl8re64.v v0, (a0)
-; CHECK-RV64V-NEXT: sltu a0, a2, a4
-; CHECK-RV64V-NEXT: addi a0, a0, -1
-; CHECK-RV64V-NEXT: and a0, a0, a4
-; CHECK-RV64V-NEXT: sub a4, a0, a1
-; CHECK-RV64V-NEXT: sltu a5, a0, a4
+; CHECK-RV64V-NEXT: add a4, a0, a4
+; CHECK-RV64V-NEXT: sub a0, a2, a3
+; CHECK-RV64V-NEXT: sltu a5, a2, a0
; CHECK-RV64V-NEXT: addi a5, a5, -1
-; CHECK-RV64V-NEXT: and a4, a5, a4
+; CHECK-RV64V-NEXT: and a0, a5, a0
+; CHECK-RV64V-NEXT: sub a5, a0, a1
+; CHECK-RV64V-NEXT: sltu a6, a0, a5
+; CHECK-RV64V-NEXT: addi a6, a6, -1
+; CHECK-RV64V-NEXT: and a5, a6, a5
+; CHECK-RV64V-NEXT: vsetvli zero, a5, e64, m8, ta, ma
+; CHECK-RV64V-NEXT: vle64.v v0, (a4)
; CHECK-RV64V-NEXT: bltu a0, a1, .LBB884_2
; CHECK-RV64V-NEXT: # %bb.1:
; CHECK-RV64V-NEXT: mv a0, a1
; CHECK-RV64V-NEXT: .LBB884_2:
-; CHECK-RV64V-NEXT: vsetvli zero, a4, e16, m2, ta, ma
+; CHECK-RV64V-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-RV64V-NEXT: ntl.all
; CHECK-RV64V-NEXT: vluxei64.v v30, (zero), v0
; CHECK-RV64V-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-RV64V-NEXT: ntl.all
-; CHECK-RV64V-NEXT: vluxei64.v v28, (zero), v8
+; CHECK-RV64V-NEXT: vluxei64.v v28, (zero), v16
; CHECK-RV64V-NEXT: bltu a2, a3, .LBB884_4
; CHECK-RV64V-NEXT: # %bb.3:
; CHECK-RV64V-NEXT: mv a2, a3
@@ -32743,6 +32762,8 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_DEFAULT(<vscale
; CHECK-RV64V-NEXT: sltu a3, a2, a0
; CHECK-RV64V-NEXT: addi a3, a3, -1
; CHECK-RV64V-NEXT: and a0, a3, a0
+; CHECK-RV64V-NEXT: addi a3, sp, 16
+; CHECK-RV64V-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-RV64V-NEXT: ntl.all
; CHECK-RV64V-NEXT: vluxei64.v v26, (zero), v16
@@ -32750,8 +32771,6 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_DEFAULT(<vscale
; CHECK-RV64V-NEXT: # %bb.5:
; CHECK-RV64V-NEXT: mv a2, a1
; CHECK-RV64V-NEXT: .LBB884_6:
-; CHECK-RV64V-NEXT: addi a0, sp, 16
-; CHECK-RV64V-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-RV64V-NEXT: ntl.all
; CHECK-RV64V-NEXT: vluxei64.v v24, (zero), v8
@@ -32794,31 +32813,32 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_DEFAULT(<vscale
; CHECK-RV64VC-NEXT: sub sp, sp, a1
; CHECK-RV64VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV64VC-NEXT: addi a1, sp, 16
-; CHECK-RV64VC-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV64VC-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV64VC-NEXT: csrr a1, vlenb
-; CHECK-RV64VC-NEXT: vl8re64.v v8, (a0)
+; CHECK-RV64VC-NEXT: vl8re64.v v16, (a0)
; CHECK-RV64VC-NEXT: slli a4, a1, 3
; CHECK-RV64VC-NEXT: slli a3, a1, 1
-; CHECK-RV64VC-NEXT: add a0, a0, a4
-; CHECK-RV64VC-NEXT: sub a4, a2, a3
-; CHECK-RV64VC-NEXT: vl8re64.v v0, (a0)
-; CHECK-RV64VC-NEXT: sltu a0, a2, a4
-; CHECK-RV64VC-NEXT: addi a0, a0, -1
-; CHECK-RV64VC-NEXT: and a0, a0, a4
-; CHECK-RV64VC-NEXT: sub a4, a0, a1
-; CHECK-RV64VC-NEXT: sltu a5, a0, a4
+; CHECK-RV64VC-NEXT: add a6, a0, a4
+; CHECK-RV64VC-NEXT: sub a0, a2, a3
+; CHECK-RV64VC-NEXT: sltu a5, a2, a0
; CHECK-RV64VC-NEXT: addi a5, a5, -1
+; CHECK-RV64VC-NEXT: and a0, a0, a5
+; CHECK-RV64VC-NEXT: sub a5, a0, a1
+; CHECK-RV64VC-NEXT: sltu a4, a0, a5
+; CHECK-RV64VC-NEXT: addi a4, a4, -1
; CHECK-RV64VC-NEXT: and a4, a4, a5
+; CHECK-RV64VC-NEXT: vsetvli zero, a4, e64, m8, ta, ma
+; CHECK-RV64VC-NEXT: vle64.v v0, (a6)
; CHECK-RV64VC-NEXT: bltu a0, a1, .LBB884_2
; CHECK-RV64VC-NEXT: # %bb.1:
; CHECK-RV64VC-NEXT: mv a0, a1
; CHECK-RV64VC-NEXT: .LBB884_2:
-; CHECK-RV64VC-NEXT: vsetvli zero, a4, e16, m2, ta, ma
+; CHECK-RV64VC-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
; CHECK-RV64VC-NEXT: vluxei64.v v30, (zero), v0
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
-; CHECK-RV64VC-NEXT: vluxei64.v v28, (zero), v8
+; CHECK-RV64VC-NEXT: vluxei64.v v28, (zero), v16
; CHECK-RV64VC-NEXT: bltu a2, a3, .LBB884_4
; CHECK-RV64VC-NEXT: # %bb.3:
; CHECK-RV64VC-NEXT: mv a2, a3
@@ -32827,6 +32847,8 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_DEFAULT(<vscale
; CHECK-RV64VC-NEXT: sltu a3, a2, a0
; CHECK-RV64VC-NEXT: addi a3, a3, -1
; CHECK-RV64VC-NEXT: and a0, a0, a3
+; CHECK-RV64VC-NEXT: addi a3, sp, 16
+; CHECK-RV64VC-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
; CHECK-RV64VC-NEXT: vluxei64.v v26, (zero), v16
@@ -32834,8 +32856,6 @@ define <vscale x 32 x i16> @test_nontemporal_vp_gather_nxv32i16_DEFAULT(<vscale
; CHECK-RV64VC-NEXT: # %bb.5:
; CHECK-RV64VC-NEXT: mv a2, a1
; CHECK-RV64VC-NEXT: .LBB884_6:
-; CHECK-RV64VC-NEXT: addi a0, sp, 16
-; CHECK-RV64VC-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
; CHECK-RV64VC-NEXT: vluxei64.v v24, (zero), v8
@@ -34535,51 +34555,51 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_P1(<vscale x 64 x
; CHECK-RV64V-NEXT: sub a4, a3, a1
; CHECK-RV64V-NEXT: sltu a5, a3, a4
; CHECK-RV64V-NEXT: addi a5, a5, -1
-; CHECK-RV64V-NEXT: and a6, a5, a4
-; CHECK-RV64V-NEXT: sub a4, a6, s1
-; CHECK-RV64V-NEXT: mv a5, a6
-; CHECK-RV64V-NEXT: bltu a6, s1, .LBB910_2
+; CHECK-RV64V-NEXT: and a4, a5, a4
+; CHECK-RV64V-NEXT: sub a5, a4, s1
+; CHECK-RV64V-NEXT: sltu a7, a4, a5
+; CHECK-RV64V-NEXT: bltu a4, s1, .LBB910_2
; CHECK-RV64V-NEXT: # %bb.1:
-; CHECK-RV64V-NEXT: mv a5, s1
+; CHECK-RV64V-NEXT: mv a4, s1
; CHECK-RV64V-NEXT: .LBB910_2:
-; CHECK-RV64V-NEXT: sltu a7, a6, a4
+; CHECK-RV64V-NEXT: addi a7, a7, -1
; CHECK-RV64V-NEXT: bltu a3, a1, .LBB910_4
; CHECK-RV64V-NEXT: # %bb.3:
; CHECK-RV64V-NEXT: mv a3, a1
; CHECK-RV64V-NEXT: .LBB910_4:
; CHECK-RV64V-NEXT: add a6, s2, a0
-; CHECK-RV64V-NEXT: addi a0, a7, -1
-; CHECK-RV64V-NEXT: sub a7, a3, s1
-; CHECK-RV64V-NEXT: sltu t0, a3, a7
-; CHECK-RV64V-NEXT: addi t0, t0, -1
-; CHECK-RV64V-NEXT: and a7, t0, a7
+; CHECK-RV64V-NEXT: and a0, a7, a5
+; CHECK-RV64V-NEXT: sub a5, a3, s1
+; CHECK-RV64V-NEXT: sltu a7, a3, a5
+; CHECK-RV64V-NEXT: addi a7, a7, -1
+; CHECK-RV64V-NEXT: and a5, a7, a5
; CHECK-RV64V-NEXT: bltu a3, s1, .LBB910_6
; CHECK-RV64V-NEXT: # %bb.5:
; CHECK-RV64V-NEXT: mv a3, s1
; CHECK-RV64V-NEXT: .LBB910_6:
-; CHECK-RV64V-NEXT: vl8re64.v v16, (a6)
+; CHECK-RV64V-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-RV64V-NEXT: vle64.v v16, (a6)
; CHECK-RV64V-NEXT: addi a6, sp, 16
; CHECK-RV64V-NEXT: vl8r.v v24, (a6) # vscale x 64-byte Folded Reload
-; CHECK-RV64V-NEXT: vsetvli zero, a5, e8, m1, ta, ma
+; CHECK-RV64V-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.p1
; CHECK-RV64V-NEXT: vluxei64.v v14, (zero), v24
-; CHECK-RV64V-NEXT: csrr a5, vlenb
-; CHECK-RV64V-NEXT: slli a5, a5, 3
-; CHECK-RV64V-NEXT: add a5, sp, a5
-; CHECK-RV64V-NEXT: addi a5, a5, 16
-; CHECK-RV64V-NEXT: vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
-; CHECK-RV64V-NEXT: vsetvli zero, a7, e8, m1, ta, ma
+; CHECK-RV64V-NEXT: csrr a4, vlenb
+; CHECK-RV64V-NEXT: slli a4, a4, 3
+; CHECK-RV64V-NEXT: add a4, sp, a4
+; CHECK-RV64V-NEXT: addi a4, a4, 16
+; CHECK-RV64V-NEXT: vl8r.v v24, (a4) # vscale x 64-byte Folded Reload
+; CHECK-RV64V-NEXT: vsetvli zero, a5, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.p1
; CHECK-RV64V-NEXT: vluxei64.v v13, (zero), v24
-; CHECK-RV64V-NEXT: csrr a5, vlenb
-; CHECK-RV64V-NEXT: slli a5, a5, 4
-; CHECK-RV64V-NEXT: add a5, sp, a5
-; CHECK-RV64V-NEXT: addi a5, a5, 16
-; CHECK-RV64V-NEXT: vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
+; CHECK-RV64V-NEXT: csrr a4, vlenb
+; CHECK-RV64V-NEXT: slli a4, a4, 4
+; CHECK-RV64V-NEXT: add a4, sp, a4
+; CHECK-RV64V-NEXT: addi a4, a4, 16
+; CHECK-RV64V-NEXT: vl8r.v v24, (a4) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.p1
; CHECK-RV64V-NEXT: vluxei64.v v12, (zero), v24
-; CHECK-RV64V-NEXT: and a0, a0, a4
; CHECK-RV64V-NEXT: bltu s0, a2, .LBB910_8
; CHECK-RV64V-NEXT: # %bb.7:
; CHECK-RV64V-NEXT: mv s0, a2
@@ -34673,32 +34693,33 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_P1(<vscale x 64 x
; CHECK-RV32V-NEXT: sub sp, sp, a1
; CHECK-RV32V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV32V-NEXT: addi a1, sp, 16
-; CHECK-RV32V-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV32V-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV32V-NEXT: csrr a4, vlenb
-; CHECK-RV32V-NEXT: vl8re32.v v8, (a0)
+; CHECK-RV32V-NEXT: vl8re32.v v16, (a0)
; CHECK-RV32V-NEXT: slli a3, a4, 3
; CHECK-RV32V-NEXT: slli a1, a4, 2
-; CHECK-RV32V-NEXT: add a0, a0, a3
-; CHECK-RV32V-NEXT: sub a3, a2, a1
-; CHECK-RV32V-NEXT: vl8re32.v v0, (a0)
-; CHECK-RV32V-NEXT: sltu a0, a2, a3
-; CHECK-RV32V-NEXT: addi a0, a0, -1
-; CHECK-RV32V-NEXT: and a3, a0, a3
+; CHECK-RV32V-NEXT: add a5, a0, a3
+; CHECK-RV32V-NEXT: sub a0, a2, a1
+; CHECK-RV32V-NEXT: sltu a3, a2, a0
+; CHECK-RV32V-NEXT: addi a3, a3, -1
+; CHECK-RV32V-NEXT: and a3, a3, a0
; CHECK-RV32V-NEXT: slli a0, a4, 1
; CHECK-RV32V-NEXT: sub a4, a3, a0
-; CHECK-RV32V-NEXT: sltu a5, a3, a4
-; CHECK-RV32V-NEXT: addi a5, a5, -1
-; CHECK-RV32V-NEXT: and a4, a5, a4
+; CHECK-RV32V-NEXT: sltu a6, a3, a4
+; CHECK-RV32V-NEXT: addi a6, a6, -1
+; CHECK-RV32V-NEXT: and a4, a6, a4
+; CHECK-RV32V-NEXT: vsetvli zero, a4, e32, m8, ta, ma
+; CHECK-RV32V-NEXT: vle32.v v0, (a5)
; CHECK-RV32V-NEXT: bltu a3, a0, .LBB910_2
; CHECK-RV32V-NEXT: # %bb.1:
; CHECK-RV32V-NEXT: mv a3, a0
; CHECK-RV32V-NEXT: .LBB910_2:
-; CHECK-RV32V-NEXT: vsetvli zero, a4, e8, m2, ta, ma
+; CHECK-RV32V-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; CHECK-RV32V-NEXT: ntl.p1
; CHECK-RV32V-NEXT: vluxei32.v v30, (zero), v0
; CHECK-RV32V-NEXT: vsetvli zero, a3, e8, m2, ta, ma
; CHECK-RV32V-NEXT: ntl.p1
-; CHECK-RV32V-NEXT: vluxei32.v v28, (zero), v8
+; CHECK-RV32V-NEXT: vluxei32.v v28, (zero), v16
; CHECK-RV32V-NEXT: bltu a2, a1, .LBB910_4
; CHECK-RV32V-NEXT: # %bb.3:
; CHECK-RV32V-NEXT: mv a2, a1
@@ -34707,6 +34728,8 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_P1(<vscale x 64 x
; CHECK-RV32V-NEXT: sltu a3, a2, a1
; CHECK-RV32V-NEXT: addi a3, a3, -1
; CHECK-RV32V-NEXT: and a1, a3, a1
+; CHECK-RV32V-NEXT: addi a3, sp, 16
+; CHECK-RV32V-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV32V-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-RV32V-NEXT: ntl.p1
; CHECK-RV32V-NEXT: vluxei32.v v26, (zero), v16
@@ -34714,8 +34737,6 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_P1(<vscale x 64 x
; CHECK-RV32V-NEXT: # %bb.5:
; CHECK-RV32V-NEXT: mv a2, a0
; CHECK-RV32V-NEXT: .LBB910_6:
-; CHECK-RV32V-NEXT: addi a0, sp, 16
-; CHECK-RV32V-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV32V-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-RV32V-NEXT: ntl.p1
; CHECK-RV32V-NEXT: vluxei32.v v24, (zero), v8
@@ -34800,8 +34821,8 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_P1(<vscale x 64 x
; CHECK-RV64VC-NEXT: li a1, 40
; CHECK-RV64VC-NEXT: mv a0, s1
; CHECK-RV64VC-NEXT: call __muldi3
-; CHECK-RV64VC-NEXT: slli a7, s1, 2
-; CHECK-RV64VC-NEXT: sub a1, s0, a7
+; CHECK-RV64VC-NEXT: slli a6, s1, 2
+; CHECK-RV64VC-NEXT: sub a1, s0, a6
; CHECK-RV64VC-NEXT: sltu a2, s0, a1
; CHECK-RV64VC-NEXT: addi a2, a2, -1
; CHECK-RV64VC-NEXT: and a3, a2, a1
@@ -34809,54 +34830,54 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_P1(<vscale x 64 x
; CHECK-RV64VC-NEXT: sub a2, a3, a1
; CHECK-RV64VC-NEXT: sltu a4, a3, a2
; CHECK-RV64VC-NEXT: addi a4, a4, -1
-; CHECK-RV64VC-NEXT: and a2, a2, a4
-; CHECK-RV64VC-NEXT: sub t0, a2, s1
-; CHECK-RV64VC-NEXT: mv a5, a2
-; CHECK-RV64VC-NEXT: bltu a2, s1, .LBB910_2
+; CHECK-RV64VC-NEXT: and a4, a4, a2
+; CHECK-RV64VC-NEXT: sub a5, a4, s1
+; CHECK-RV64VC-NEXT: sltu a2, a4, a5
+; CHECK-RV64VC-NEXT: bltu a4, s1, .LBB910_2
; CHECK-RV64VC-NEXT: # %bb.1:
-; CHECK-RV64VC-NEXT: mv a5, s1
+; CHECK-RV64VC-NEXT: mv a4, s1
; CHECK-RV64VC-NEXT: .LBB910_2:
-; CHECK-RV64VC-NEXT: sltu a6, a2, t0
+; CHECK-RV64VC-NEXT: addi a2, a2, -1
; CHECK-RV64VC-NEXT: bltu a3, a1, .LBB910_4
; CHECK-RV64VC-NEXT: # %bb.3:
; CHECK-RV64VC-NEXT: mv a3, a1
; CHECK-RV64VC-NEXT: .LBB910_4:
-; CHECK-RV64VC-NEXT: add a0, a0, s2
-; CHECK-RV64VC-NEXT: addi a6, a6, -1
+; CHECK-RV64VC-NEXT: add a7, s2, a0
+; CHECK-RV64VC-NEXT: and a0, a2, a5
; CHECK-RV64VC-NEXT: sub a2, a3, s1
-; CHECK-RV64VC-NEXT: sltu a4, a3, a2
-; CHECK-RV64VC-NEXT: addi a4, a4, -1
-; CHECK-RV64VC-NEXT: and a2, a2, a4
+; CHECK-RV64VC-NEXT: sltu a5, a3, a2
+; CHECK-RV64VC-NEXT: addi a5, a5, -1
+; CHECK-RV64VC-NEXT: and a5, a5, a2
; CHECK-RV64VC-NEXT: bltu a3, s1, .LBB910_6
; CHECK-RV64VC-NEXT: # %bb.5:
; CHECK-RV64VC-NEXT: mv a3, s1
; CHECK-RV64VC-NEXT: .LBB910_6:
-; CHECK-RV64VC-NEXT: vl8re64.v v16, (a0)
-; CHECK-RV64VC-NEXT: addi a0, sp, 16
-; CHECK-RV64VC-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; CHECK-RV64VC-NEXT: vsetvli zero, a5, e8, m1, ta, ma
+; CHECK-RV64VC-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-RV64VC-NEXT: vle64.v v16, (a7)
+; CHECK-RV64VC-NEXT: addi a2, sp, 16
+; CHECK-RV64VC-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
+; CHECK-RV64VC-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.p1
; CHECK-RV64VC-NEXT: vluxei64.v v14, (zero), v24
-; CHECK-RV64VC-NEXT: csrr a0, vlenb
-; CHECK-RV64VC-NEXT: slli a0, a0, 3
-; CHECK-RV64VC-NEXT: add a0, a0, sp
-; CHECK-RV64VC-NEXT: addi a0, a0, 16
-; CHECK-RV64VC-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; CHECK-RV64VC-NEXT: vsetvli zero, a2, e8, m1, ta, ma
+; CHECK-RV64VC-NEXT: csrr a2, vlenb
+; CHECK-RV64VC-NEXT: slli a2, a2, 3
+; CHECK-RV64VC-NEXT: add a2, a2, sp
+; CHECK-RV64VC-NEXT: addi a2, a2, 16
+; CHECK-RV64VC-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
+; CHECK-RV64VC-NEXT: vsetvli zero, a5, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.p1
; CHECK-RV64VC-NEXT: vluxei64.v v13, (zero), v24
-; CHECK-RV64VC-NEXT: csrr a0, vlenb
-; CHECK-RV64VC-NEXT: slli a0, a0, 4
-; CHECK-RV64VC-NEXT: add a0, a0, sp
-; CHECK-RV64VC-NEXT: addi a0, a0, 16
-; CHECK-RV64VC-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; CHECK-RV64VC-NEXT: csrr a2, vlenb
+; CHECK-RV64VC-NEXT: slli a2, a2, 4
+; CHECK-RV64VC-NEXT: add a2, a2, sp
+; CHECK-RV64VC-NEXT: addi a2, a2, 16
+; CHECK-RV64VC-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.p1
; CHECK-RV64VC-NEXT: vluxei64.v v12, (zero), v24
-; CHECK-RV64VC-NEXT: and a0, a6, t0
-; CHECK-RV64VC-NEXT: bltu s0, a7, .LBB910_8
+; CHECK-RV64VC-NEXT: bltu s0, a6, .LBB910_8
; CHECK-RV64VC-NEXT: # %bb.7:
-; CHECK-RV64VC-NEXT: mv s0, a7
+; CHECK-RV64VC-NEXT: mv s0, a6
; CHECK-RV64VC-NEXT: .LBB910_8:
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.p1
@@ -34947,32 +34968,33 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_P1(<vscale x 64 x
; CHECK-RV32VC-NEXT: sub sp, sp, a1
; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV32VC-NEXT: addi a1, sp, 16
-; CHECK-RV32VC-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV32VC-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV32VC-NEXT: csrr a4, vlenb
-; CHECK-RV32VC-NEXT: vl8re32.v v8, (a0)
+; CHECK-RV32VC-NEXT: vl8re32.v v16, (a0)
; CHECK-RV32VC-NEXT: slli a3, a4, 3
; CHECK-RV32VC-NEXT: slli a1, a4, 2
-; CHECK-RV32VC-NEXT: add a0, a0, a3
-; CHECK-RV32VC-NEXT: sub a3, a2, a1
-; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0)
-; CHECK-RV32VC-NEXT: sltu a0, a2, a3
-; CHECK-RV32VC-NEXT: addi a0, a0, -1
+; CHECK-RV32VC-NEXT: add a6, a0, a3
+; CHECK-RV32VC-NEXT: sub a0, a2, a1
+; CHECK-RV32VC-NEXT: sltu a3, a2, a0
+; CHECK-RV32VC-NEXT: addi a3, a3, -1
; CHECK-RV32VC-NEXT: and a3, a3, a0
; CHECK-RV32VC-NEXT: slli a0, a4, 1
; CHECK-RV32VC-NEXT: sub a4, a3, a0
; CHECK-RV32VC-NEXT: sltu a5, a3, a4
; CHECK-RV32VC-NEXT: addi a5, a5, -1
; CHECK-RV32VC-NEXT: and a4, a4, a5
+; CHECK-RV32VC-NEXT: vsetvli zero, a4, e32, m8, ta, ma
+; CHECK-RV32VC-NEXT: vle32.v v0, (a6)
; CHECK-RV32VC-NEXT: bltu a3, a0, .LBB910_2
; CHECK-RV32VC-NEXT: # %bb.1:
; CHECK-RV32VC-NEXT: mv a3, a0
; CHECK-RV32VC-NEXT: .LBB910_2:
-; CHECK-RV32VC-NEXT: vsetvli zero, a4, e8, m2, ta, ma
+; CHECK-RV32VC-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; CHECK-RV32VC-NEXT: c.ntl.p1
; CHECK-RV32VC-NEXT: vluxei32.v v30, (zero), v0
; CHECK-RV32VC-NEXT: vsetvli zero, a3, e8, m2, ta, ma
; CHECK-RV32VC-NEXT: c.ntl.p1
-; CHECK-RV32VC-NEXT: vluxei32.v v28, (zero), v8
+; CHECK-RV32VC-NEXT: vluxei32.v v28, (zero), v16
; CHECK-RV32VC-NEXT: bltu a2, a1, .LBB910_4
; CHECK-RV32VC-NEXT: # %bb.3:
; CHECK-RV32VC-NEXT: mv a2, a1
@@ -34981,6 +35003,8 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_P1(<vscale x 64 x
; CHECK-RV32VC-NEXT: sltu a3, a2, a1
; CHECK-RV32VC-NEXT: addi a3, a3, -1
; CHECK-RV32VC-NEXT: and a1, a1, a3
+; CHECK-RV32VC-NEXT: addi a3, sp, 16
+; CHECK-RV32VC-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV32VC-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-RV32VC-NEXT: c.ntl.p1
; CHECK-RV32VC-NEXT: vluxei32.v v26, (zero), v16
@@ -34988,8 +35012,6 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_P1(<vscale x 64 x
; CHECK-RV32VC-NEXT: # %bb.5:
; CHECK-RV32VC-NEXT: mv a2, a0
; CHECK-RV32VC-NEXT: .LBB910_6:
-; CHECK-RV32VC-NEXT: addi a0, sp, 16
-; CHECK-RV32VC-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV32VC-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-RV32VC-NEXT: c.ntl.p1
; CHECK-RV32VC-NEXT: vluxei32.v v24, (zero), v8
@@ -35088,51 +35110,51 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_PALL(<vscale x 64
; CHECK-RV64V-NEXT: sub a4, a3, a1
; CHECK-RV64V-NEXT: sltu a5, a3, a4
; CHECK-RV64V-NEXT: addi a5, a5, -1
-; CHECK-RV64V-NEXT: and a6, a5, a4
-; CHECK-RV64V-NEXT: sub a4, a6, s1
-; CHECK-RV64V-NEXT: mv a5, a6
-; CHECK-RV64V-NEXT: bltu a6, s1, .LBB911_2
+; CHECK-RV64V-NEXT: and a4, a5, a4
+; CHECK-RV64V-NEXT: sub a5, a4, s1
+; CHECK-RV64V-NEXT: sltu a7, a4, a5
+; CHECK-RV64V-NEXT: bltu a4, s1, .LBB911_2
; CHECK-RV64V-NEXT: # %bb.1:
-; CHECK-RV64V-NEXT: mv a5, s1
+; CHECK-RV64V-NEXT: mv a4, s1
; CHECK-RV64V-NEXT: .LBB911_2:
-; CHECK-RV64V-NEXT: sltu a7, a6, a4
+; CHECK-RV64V-NEXT: addi a7, a7, -1
; CHECK-RV64V-NEXT: bltu a3, a1, .LBB911_4
; CHECK-RV64V-NEXT: # %bb.3:
; CHECK-RV64V-NEXT: mv a3, a1
; CHECK-RV64V-NEXT: .LBB911_4:
; CHECK-RV64V-NEXT: add a6, s2, a0
-; CHECK-RV64V-NEXT: addi a0, a7, -1
-; CHECK-RV64V-NEXT: sub a7, a3, s1
-; CHECK-RV64V-NEXT: sltu t0, a3, a7
-; CHECK-RV64V-NEXT: addi t0, t0, -1
-; CHECK-RV64V-NEXT: and a7, t0, a7
+; CHECK-RV64V-NEXT: and a0, a7, a5
+; CHECK-RV64V-NEXT: sub a5, a3, s1
+; CHECK-RV64V-NEXT: sltu a7, a3, a5
+; CHECK-RV64V-NEXT: addi a7, a7, -1
+; CHECK-RV64V-NEXT: and a5, a7, a5
; CHECK-RV64V-NEXT: bltu a3, s1, .LBB911_6
; CHECK-RV64V-NEXT: # %bb.5:
; CHECK-RV64V-NEXT: mv a3, s1
; CHECK-RV64V-NEXT: .LBB911_6:
-; CHECK-RV64V-NEXT: vl8re64.v v16, (a6)
+; CHECK-RV64V-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-RV64V-NEXT: vle64.v v16, (a6)
; CHECK-RV64V-NEXT: addi a6, sp, 16
; CHECK-RV64V-NEXT: vl8r.v v24, (a6) # vscale x 64-byte Folded Reload
-; CHECK-RV64V-NEXT: vsetvli zero, a5, e8, m1, ta, ma
+; CHECK-RV64V-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.pall
; CHECK-RV64V-NEXT: vluxei64.v v14, (zero), v24
-; CHECK-RV64V-NEXT: csrr a5, vlenb
-; CHECK-RV64V-NEXT: slli a5, a5, 3
-; CHECK-RV64V-NEXT: add a5, sp, a5
-; CHECK-RV64V-NEXT: addi a5, a5, 16
-; CHECK-RV64V-NEXT: vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
-; CHECK-RV64V-NEXT: vsetvli zero, a7, e8, m1, ta, ma
+; CHECK-RV64V-NEXT: csrr a4, vlenb
+; CHECK-RV64V-NEXT: slli a4, a4, 3
+; CHECK-RV64V-NEXT: add a4, sp, a4
+; CHECK-RV64V-NEXT: addi a4, a4, 16
+; CHECK-RV64V-NEXT: vl8r.v v24, (a4) # vscale x 64-byte Folded Reload
+; CHECK-RV64V-NEXT: vsetvli zero, a5, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.pall
; CHECK-RV64V-NEXT: vluxei64.v v13, (zero), v24
-; CHECK-RV64V-NEXT: csrr a5, vlenb
-; CHECK-RV64V-NEXT: slli a5, a5, 4
-; CHECK-RV64V-NEXT: add a5, sp, a5
-; CHECK-RV64V-NEXT: addi a5, a5, 16
-; CHECK-RV64V-NEXT: vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
+; CHECK-RV64V-NEXT: csrr a4, vlenb
+; CHECK-RV64V-NEXT: slli a4, a4, 4
+; CHECK-RV64V-NEXT: add a4, sp, a4
+; CHECK-RV64V-NEXT: addi a4, a4, 16
+; CHECK-RV64V-NEXT: vl8r.v v24, (a4) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.pall
; CHECK-RV64V-NEXT: vluxei64.v v12, (zero), v24
-; CHECK-RV64V-NEXT: and a0, a0, a4
; CHECK-RV64V-NEXT: bltu s0, a2, .LBB911_8
; CHECK-RV64V-NEXT: # %bb.7:
; CHECK-RV64V-NEXT: mv s0, a2
@@ -35226,32 +35248,33 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_PALL(<vscale x 64
; CHECK-RV32V-NEXT: sub sp, sp, a1
; CHECK-RV32V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV32V-NEXT: addi a1, sp, 16
-; CHECK-RV32V-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV32V-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV32V-NEXT: csrr a4, vlenb
-; CHECK-RV32V-NEXT: vl8re32.v v8, (a0)
+; CHECK-RV32V-NEXT: vl8re32.v v16, (a0)
; CHECK-RV32V-NEXT: slli a3, a4, 3
; CHECK-RV32V-NEXT: slli a1, a4, 2
-; CHECK-RV32V-NEXT: add a0, a0, a3
-; CHECK-RV32V-NEXT: sub a3, a2, a1
-; CHECK-RV32V-NEXT: vl8re32.v v0, (a0)
-; CHECK-RV32V-NEXT: sltu a0, a2, a3
-; CHECK-RV32V-NEXT: addi a0, a0, -1
-; CHECK-RV32V-NEXT: and a3, a0, a3
+; CHECK-RV32V-NEXT: add a5, a0, a3
+; CHECK-RV32V-NEXT: sub a0, a2, a1
+; CHECK-RV32V-NEXT: sltu a3, a2, a0
+; CHECK-RV32V-NEXT: addi a3, a3, -1
+; CHECK-RV32V-NEXT: and a3, a3, a0
; CHECK-RV32V-NEXT: slli a0, a4, 1
; CHECK-RV32V-NEXT: sub a4, a3, a0
-; CHECK-RV32V-NEXT: sltu a5, a3, a4
-; CHECK-RV32V-NEXT: addi a5, a5, -1
-; CHECK-RV32V-NEXT: and a4, a5, a4
+; CHECK-RV32V-NEXT: sltu a6, a3, a4
+; CHECK-RV32V-NEXT: addi a6, a6, -1
+; CHECK-RV32V-NEXT: and a4, a6, a4
+; CHECK-RV32V-NEXT: vsetvli zero, a4, e32, m8, ta, ma
+; CHECK-RV32V-NEXT: vle32.v v0, (a5)
; CHECK-RV32V-NEXT: bltu a3, a0, .LBB911_2
; CHECK-RV32V-NEXT: # %bb.1:
; CHECK-RV32V-NEXT: mv a3, a0
; CHECK-RV32V-NEXT: .LBB911_2:
-; CHECK-RV32V-NEXT: vsetvli zero, a4, e8, m2, ta, ma
+; CHECK-RV32V-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; CHECK-RV32V-NEXT: ntl.pall
; CHECK-RV32V-NEXT: vluxei32.v v30, (zero), v0
; CHECK-RV32V-NEXT: vsetvli zero, a3, e8, m2, ta, ma
; CHECK-RV32V-NEXT: ntl.pall
-; CHECK-RV32V-NEXT: vluxei32.v v28, (zero), v8
+; CHECK-RV32V-NEXT: vluxei32.v v28, (zero), v16
; CHECK-RV32V-NEXT: bltu a2, a1, .LBB911_4
; CHECK-RV32V-NEXT: # %bb.3:
; CHECK-RV32V-NEXT: mv a2, a1
@@ -35260,6 +35283,8 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_PALL(<vscale x 64
; CHECK-RV32V-NEXT: sltu a3, a2, a1
; CHECK-RV32V-NEXT: addi a3, a3, -1
; CHECK-RV32V-NEXT: and a1, a3, a1
+; CHECK-RV32V-NEXT: addi a3, sp, 16
+; CHECK-RV32V-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV32V-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-RV32V-NEXT: ntl.pall
; CHECK-RV32V-NEXT: vluxei32.v v26, (zero), v16
@@ -35267,8 +35292,6 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_PALL(<vscale x 64
; CHECK-RV32V-NEXT: # %bb.5:
; CHECK-RV32V-NEXT: mv a2, a0
; CHECK-RV32V-NEXT: .LBB911_6:
-; CHECK-RV32V-NEXT: addi a0, sp, 16
-; CHECK-RV32V-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV32V-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-RV32V-NEXT: ntl.pall
; CHECK-RV32V-NEXT: vluxei32.v v24, (zero), v8
@@ -35353,8 +35376,8 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_PALL(<vscale x 64
; CHECK-RV64VC-NEXT: li a1, 40
; CHECK-RV64VC-NEXT: mv a0, s1
; CHECK-RV64VC-NEXT: call __muldi3
-; CHECK-RV64VC-NEXT: slli a7, s1, 2
-; CHECK-RV64VC-NEXT: sub a1, s0, a7
+; CHECK-RV64VC-NEXT: slli a6, s1, 2
+; CHECK-RV64VC-NEXT: sub a1, s0, a6
; CHECK-RV64VC-NEXT: sltu a2, s0, a1
; CHECK-RV64VC-NEXT: addi a2, a2, -1
; CHECK-RV64VC-NEXT: and a3, a2, a1
@@ -35362,54 +35385,54 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_PALL(<vscale x 64
; CHECK-RV64VC-NEXT: sub a2, a3, a1
; CHECK-RV64VC-NEXT: sltu a4, a3, a2
; CHECK-RV64VC-NEXT: addi a4, a4, -1
-; CHECK-RV64VC-NEXT: and a2, a2, a4
-; CHECK-RV64VC-NEXT: sub t0, a2, s1
-; CHECK-RV64VC-NEXT: mv a5, a2
-; CHECK-RV64VC-NEXT: bltu a2, s1, .LBB911_2
+; CHECK-RV64VC-NEXT: and a4, a4, a2
+; CHECK-RV64VC-NEXT: sub a5, a4, s1
+; CHECK-RV64VC-NEXT: sltu a2, a4, a5
+; CHECK-RV64VC-NEXT: bltu a4, s1, .LBB911_2
; CHECK-RV64VC-NEXT: # %bb.1:
-; CHECK-RV64VC-NEXT: mv a5, s1
+; CHECK-RV64VC-NEXT: mv a4, s1
; CHECK-RV64VC-NEXT: .LBB911_2:
-; CHECK-RV64VC-NEXT: sltu a6, a2, t0
+; CHECK-RV64VC-NEXT: addi a2, a2, -1
; CHECK-RV64VC-NEXT: bltu a3, a1, .LBB911_4
; CHECK-RV64VC-NEXT: # %bb.3:
; CHECK-RV64VC-NEXT: mv a3, a1
; CHECK-RV64VC-NEXT: .LBB911_4:
-; CHECK-RV64VC-NEXT: add a0, a0, s2
-; CHECK-RV64VC-NEXT: addi a6, a6, -1
+; CHECK-RV64VC-NEXT: add a7, s2, a0
+; CHECK-RV64VC-NEXT: and a0, a2, a5
; CHECK-RV64VC-NEXT: sub a2, a3, s1
-; CHECK-RV64VC-NEXT: sltu a4, a3, a2
-; CHECK-RV64VC-NEXT: addi a4, a4, -1
-; CHECK-RV64VC-NEXT: and a2, a2, a4
+; CHECK-RV64VC-NEXT: sltu a5, a3, a2
+; CHECK-RV64VC-NEXT: addi a5, a5, -1
+; CHECK-RV64VC-NEXT: and a5, a5, a2
; CHECK-RV64VC-NEXT: bltu a3, s1, .LBB911_6
; CHECK-RV64VC-NEXT: # %bb.5:
; CHECK-RV64VC-NEXT: mv a3, s1
; CHECK-RV64VC-NEXT: .LBB911_6:
-; CHECK-RV64VC-NEXT: vl8re64.v v16, (a0)
-; CHECK-RV64VC-NEXT: addi a0, sp, 16
-; CHECK-RV64VC-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; CHECK-RV64VC-NEXT: vsetvli zero, a5, e8, m1, ta, ma
+; CHECK-RV64VC-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-RV64VC-NEXT: vle64.v v16, (a7)
+; CHECK-RV64VC-NEXT: addi a2, sp, 16
+; CHECK-RV64VC-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
+; CHECK-RV64VC-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.pall
; CHECK-RV64VC-NEXT: vluxei64.v v14, (zero), v24
-; CHECK-RV64VC-NEXT: csrr a0, vlenb
-; CHECK-RV64VC-NEXT: slli a0, a0, 3
-; CHECK-RV64VC-NEXT: add a0, a0, sp
-; CHECK-RV64VC-NEXT: addi a0, a0, 16
-; CHECK-RV64VC-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; CHECK-RV64VC-NEXT: vsetvli zero, a2, e8, m1, ta, ma
+; CHECK-RV64VC-NEXT: csrr a2, vlenb
+; CHECK-RV64VC-NEXT: slli a2, a2, 3
+; CHECK-RV64VC-NEXT: add a2, a2, sp
+; CHECK-RV64VC-NEXT: addi a2, a2, 16
+; CHECK-RV64VC-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
+; CHECK-RV64VC-NEXT: vsetvli zero, a5, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.pall
; CHECK-RV64VC-NEXT: vluxei64.v v13, (zero), v24
-; CHECK-RV64VC-NEXT: csrr a0, vlenb
-; CHECK-RV64VC-NEXT: slli a0, a0, 4
-; CHECK-RV64VC-NEXT: add a0, a0, sp
-; CHECK-RV64VC-NEXT: addi a0, a0, 16
-; CHECK-RV64VC-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; CHECK-RV64VC-NEXT: csrr a2, vlenb
+; CHECK-RV64VC-NEXT: slli a2, a2, 4
+; CHECK-RV64VC-NEXT: add a2, a2, sp
+; CHECK-RV64VC-NEXT: addi a2, a2, 16
+; CHECK-RV64VC-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.pall
; CHECK-RV64VC-NEXT: vluxei64.v v12, (zero), v24
-; CHECK-RV64VC-NEXT: and a0, a6, t0
-; CHECK-RV64VC-NEXT: bltu s0, a7, .LBB911_8
+; CHECK-RV64VC-NEXT: bltu s0, a6, .LBB911_8
; CHECK-RV64VC-NEXT: # %bb.7:
-; CHECK-RV64VC-NEXT: mv s0, a7
+; CHECK-RV64VC-NEXT: mv s0, a6
; CHECK-RV64VC-NEXT: .LBB911_8:
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.pall
@@ -35500,32 +35523,33 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_PALL(<vscale x 64
; CHECK-RV32VC-NEXT: sub sp, sp, a1
; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV32VC-NEXT: addi a1, sp, 16
-; CHECK-RV32VC-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV32VC-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV32VC-NEXT: csrr a4, vlenb
-; CHECK-RV32VC-NEXT: vl8re32.v v8, (a0)
+; CHECK-RV32VC-NEXT: vl8re32.v v16, (a0)
; CHECK-RV32VC-NEXT: slli a3, a4, 3
; CHECK-RV32VC-NEXT: slli a1, a4, 2
-; CHECK-RV32VC-NEXT: add a0, a0, a3
-; CHECK-RV32VC-NEXT: sub a3, a2, a1
-; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0)
-; CHECK-RV32VC-NEXT: sltu a0, a2, a3
-; CHECK-RV32VC-NEXT: addi a0, a0, -1
+; CHECK-RV32VC-NEXT: add a6, a0, a3
+; CHECK-RV32VC-NEXT: sub a0, a2, a1
+; CHECK-RV32VC-NEXT: sltu a3, a2, a0
+; CHECK-RV32VC-NEXT: addi a3, a3, -1
; CHECK-RV32VC-NEXT: and a3, a3, a0
; CHECK-RV32VC-NEXT: slli a0, a4, 1
; CHECK-RV32VC-NEXT: sub a4, a3, a0
; CHECK-RV32VC-NEXT: sltu a5, a3, a4
; CHECK-RV32VC-NEXT: addi a5, a5, -1
; CHECK-RV32VC-NEXT: and a4, a4, a5
+; CHECK-RV32VC-NEXT: vsetvli zero, a4, e32, m8, ta, ma
+; CHECK-RV32VC-NEXT: vle32.v v0, (a6)
; CHECK-RV32VC-NEXT: bltu a3, a0, .LBB911_2
; CHECK-RV32VC-NEXT: # %bb.1:
; CHECK-RV32VC-NEXT: mv a3, a0
; CHECK-RV32VC-NEXT: .LBB911_2:
-; CHECK-RV32VC-NEXT: vsetvli zero, a4, e8, m2, ta, ma
+; CHECK-RV32VC-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; CHECK-RV32VC-NEXT: c.ntl.pall
; CHECK-RV32VC-NEXT: vluxei32.v v30, (zero), v0
; CHECK-RV32VC-NEXT: vsetvli zero, a3, e8, m2, ta, ma
; CHECK-RV32VC-NEXT: c.ntl.pall
-; CHECK-RV32VC-NEXT: vluxei32.v v28, (zero), v8
+; CHECK-RV32VC-NEXT: vluxei32.v v28, (zero), v16
; CHECK-RV32VC-NEXT: bltu a2, a1, .LBB911_4
; CHECK-RV32VC-NEXT: # %bb.3:
; CHECK-RV32VC-NEXT: mv a2, a1
@@ -35534,6 +35558,8 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_PALL(<vscale x 64
; CHECK-RV32VC-NEXT: sltu a3, a2, a1
; CHECK-RV32VC-NEXT: addi a3, a3, -1
; CHECK-RV32VC-NEXT: and a1, a1, a3
+; CHECK-RV32VC-NEXT: addi a3, sp, 16
+; CHECK-RV32VC-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV32VC-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-RV32VC-NEXT: c.ntl.pall
; CHECK-RV32VC-NEXT: vluxei32.v v26, (zero), v16
@@ -35541,8 +35567,6 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_PALL(<vscale x 64
; CHECK-RV32VC-NEXT: # %bb.5:
; CHECK-RV32VC-NEXT: mv a2, a0
; CHECK-RV32VC-NEXT: .LBB911_6:
-; CHECK-RV32VC-NEXT: addi a0, sp, 16
-; CHECK-RV32VC-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV32VC-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-RV32VC-NEXT: c.ntl.pall
; CHECK-RV32VC-NEXT: vluxei32.v v24, (zero), v8
@@ -35641,51 +35665,51 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_S1(<vscale x 64 x
; CHECK-RV64V-NEXT: sub a4, a3, a1
; CHECK-RV64V-NEXT: sltu a5, a3, a4
; CHECK-RV64V-NEXT: addi a5, a5, -1
-; CHECK-RV64V-NEXT: and a6, a5, a4
-; CHECK-RV64V-NEXT: sub a4, a6, s1
-; CHECK-RV64V-NEXT: mv a5, a6
-; CHECK-RV64V-NEXT: bltu a6, s1, .LBB912_2
+; CHECK-RV64V-NEXT: and a4, a5, a4
+; CHECK-RV64V-NEXT: sub a5, a4, s1
+; CHECK-RV64V-NEXT: sltu a7, a4, a5
+; CHECK-RV64V-NEXT: bltu a4, s1, .LBB912_2
; CHECK-RV64V-NEXT: # %bb.1:
-; CHECK-RV64V-NEXT: mv a5, s1
+; CHECK-RV64V-NEXT: mv a4, s1
; CHECK-RV64V-NEXT: .LBB912_2:
-; CHECK-RV64V-NEXT: sltu a7, a6, a4
+; CHECK-RV64V-NEXT: addi a7, a7, -1
; CHECK-RV64V-NEXT: bltu a3, a1, .LBB912_4
; CHECK-RV64V-NEXT: # %bb.3:
; CHECK-RV64V-NEXT: mv a3, a1
; CHECK-RV64V-NEXT: .LBB912_4:
; CHECK-RV64V-NEXT: add a6, s2, a0
-; CHECK-RV64V-NEXT: addi a0, a7, -1
-; CHECK-RV64V-NEXT: sub a7, a3, s1
-; CHECK-RV64V-NEXT: sltu t0, a3, a7
-; CHECK-RV64V-NEXT: addi t0, t0, -1
-; CHECK-RV64V-NEXT: and a7, t0, a7
+; CHECK-RV64V-NEXT: and a0, a7, a5
+; CHECK-RV64V-NEXT: sub a5, a3, s1
+; CHECK-RV64V-NEXT: sltu a7, a3, a5
+; CHECK-RV64V-NEXT: addi a7, a7, -1
+; CHECK-RV64V-NEXT: and a5, a7, a5
; CHECK-RV64V-NEXT: bltu a3, s1, .LBB912_6
; CHECK-RV64V-NEXT: # %bb.5:
; CHECK-RV64V-NEXT: mv a3, s1
; CHECK-RV64V-NEXT: .LBB912_6:
-; CHECK-RV64V-NEXT: vl8re64.v v16, (a6)
+; CHECK-RV64V-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-RV64V-NEXT: vle64.v v16, (a6)
; CHECK-RV64V-NEXT: addi a6, sp, 16
; CHECK-RV64V-NEXT: vl8r.v v24, (a6) # vscale x 64-byte Folded Reload
-; CHECK-RV64V-NEXT: vsetvli zero, a5, e8, m1, ta, ma
+; CHECK-RV64V-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.s1
; CHECK-RV64V-NEXT: vluxei64.v v14, (zero), v24
-; CHECK-RV64V-NEXT: csrr a5, vlenb
-; CHECK-RV64V-NEXT: slli a5, a5, 3
-; CHECK-RV64V-NEXT: add a5, sp, a5
-; CHECK-RV64V-NEXT: addi a5, a5, 16
-; CHECK-RV64V-NEXT: vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
-; CHECK-RV64V-NEXT: vsetvli zero, a7, e8, m1, ta, ma
+; CHECK-RV64V-NEXT: csrr a4, vlenb
+; CHECK-RV64V-NEXT: slli a4, a4, 3
+; CHECK-RV64V-NEXT: add a4, sp, a4
+; CHECK-RV64V-NEXT: addi a4, a4, 16
+; CHECK-RV64V-NEXT: vl8r.v v24, (a4) # vscale x 64-byte Folded Reload
+; CHECK-RV64V-NEXT: vsetvli zero, a5, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.s1
; CHECK-RV64V-NEXT: vluxei64.v v13, (zero), v24
-; CHECK-RV64V-NEXT: csrr a5, vlenb
-; CHECK-RV64V-NEXT: slli a5, a5, 4
-; CHECK-RV64V-NEXT: add a5, sp, a5
-; CHECK-RV64V-NEXT: addi a5, a5, 16
-; CHECK-RV64V-NEXT: vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
+; CHECK-RV64V-NEXT: csrr a4, vlenb
+; CHECK-RV64V-NEXT: slli a4, a4, 4
+; CHECK-RV64V-NEXT: add a4, sp, a4
+; CHECK-RV64V-NEXT: addi a4, a4, 16
+; CHECK-RV64V-NEXT: vl8r.v v24, (a4) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.s1
; CHECK-RV64V-NEXT: vluxei64.v v12, (zero), v24
-; CHECK-RV64V-NEXT: and a0, a0, a4
; CHECK-RV64V-NEXT: bltu s0, a2, .LBB912_8
; CHECK-RV64V-NEXT: # %bb.7:
; CHECK-RV64V-NEXT: mv s0, a2
@@ -35779,32 +35803,33 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_S1(<vscale x 64 x
; CHECK-RV32V-NEXT: sub sp, sp, a1
; CHECK-RV32V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV32V-NEXT: addi a1, sp, 16
-; CHECK-RV32V-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV32V-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV32V-NEXT: csrr a4, vlenb
-; CHECK-RV32V-NEXT: vl8re32.v v8, (a0)
+; CHECK-RV32V-NEXT: vl8re32.v v16, (a0)
; CHECK-RV32V-NEXT: slli a3, a4, 3
; CHECK-RV32V-NEXT: slli a1, a4, 2
-; CHECK-RV32V-NEXT: add a0, a0, a3
-; CHECK-RV32V-NEXT: sub a3, a2, a1
-; CHECK-RV32V-NEXT: vl8re32.v v0, (a0)
-; CHECK-RV32V-NEXT: sltu a0, a2, a3
-; CHECK-RV32V-NEXT: addi a0, a0, -1
-; CHECK-RV32V-NEXT: and a3, a0, a3
+; CHECK-RV32V-NEXT: add a5, a0, a3
+; CHECK-RV32V-NEXT: sub a0, a2, a1
+; CHECK-RV32V-NEXT: sltu a3, a2, a0
+; CHECK-RV32V-NEXT: addi a3, a3, -1
+; CHECK-RV32V-NEXT: and a3, a3, a0
; CHECK-RV32V-NEXT: slli a0, a4, 1
; CHECK-RV32V-NEXT: sub a4, a3, a0
-; CHECK-RV32V-NEXT: sltu a5, a3, a4
-; CHECK-RV32V-NEXT: addi a5, a5, -1
-; CHECK-RV32V-NEXT: and a4, a5, a4
+; CHECK-RV32V-NEXT: sltu a6, a3, a4
+; CHECK-RV32V-NEXT: addi a6, a6, -1
+; CHECK-RV32V-NEXT: and a4, a6, a4
+; CHECK-RV32V-NEXT: vsetvli zero, a4, e32, m8, ta, ma
+; CHECK-RV32V-NEXT: vle32.v v0, (a5)
; CHECK-RV32V-NEXT: bltu a3, a0, .LBB912_2
; CHECK-RV32V-NEXT: # %bb.1:
; CHECK-RV32V-NEXT: mv a3, a0
; CHECK-RV32V-NEXT: .LBB912_2:
-; CHECK-RV32V-NEXT: vsetvli zero, a4, e8, m2, ta, ma
+; CHECK-RV32V-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; CHECK-RV32V-NEXT: ntl.s1
; CHECK-RV32V-NEXT: vluxei32.v v30, (zero), v0
; CHECK-RV32V-NEXT: vsetvli zero, a3, e8, m2, ta, ma
; CHECK-RV32V-NEXT: ntl.s1
-; CHECK-RV32V-NEXT: vluxei32.v v28, (zero), v8
+; CHECK-RV32V-NEXT: vluxei32.v v28, (zero), v16
; CHECK-RV32V-NEXT: bltu a2, a1, .LBB912_4
; CHECK-RV32V-NEXT: # %bb.3:
; CHECK-RV32V-NEXT: mv a2, a1
@@ -35813,6 +35838,8 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_S1(<vscale x 64 x
; CHECK-RV32V-NEXT: sltu a3, a2, a1
; CHECK-RV32V-NEXT: addi a3, a3, -1
; CHECK-RV32V-NEXT: and a1, a3, a1
+; CHECK-RV32V-NEXT: addi a3, sp, 16
+; CHECK-RV32V-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV32V-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-RV32V-NEXT: ntl.s1
; CHECK-RV32V-NEXT: vluxei32.v v26, (zero), v16
@@ -35820,8 +35847,6 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_S1(<vscale x 64 x
; CHECK-RV32V-NEXT: # %bb.5:
; CHECK-RV32V-NEXT: mv a2, a0
; CHECK-RV32V-NEXT: .LBB912_6:
-; CHECK-RV32V-NEXT: addi a0, sp, 16
-; CHECK-RV32V-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV32V-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-RV32V-NEXT: ntl.s1
; CHECK-RV32V-NEXT: vluxei32.v v24, (zero), v8
@@ -35906,8 +35931,8 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_S1(<vscale x 64 x
; CHECK-RV64VC-NEXT: li a1, 40
; CHECK-RV64VC-NEXT: mv a0, s1
; CHECK-RV64VC-NEXT: call __muldi3
-; CHECK-RV64VC-NEXT: slli a7, s1, 2
-; CHECK-RV64VC-NEXT: sub a1, s0, a7
+; CHECK-RV64VC-NEXT: slli a6, s1, 2
+; CHECK-RV64VC-NEXT: sub a1, s0, a6
; CHECK-RV64VC-NEXT: sltu a2, s0, a1
; CHECK-RV64VC-NEXT: addi a2, a2, -1
; CHECK-RV64VC-NEXT: and a3, a2, a1
@@ -35915,54 +35940,54 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_S1(<vscale x 64 x
; CHECK-RV64VC-NEXT: sub a2, a3, a1
; CHECK-RV64VC-NEXT: sltu a4, a3, a2
; CHECK-RV64VC-NEXT: addi a4, a4, -1
-; CHECK-RV64VC-NEXT: and a2, a2, a4
-; CHECK-RV64VC-NEXT: sub t0, a2, s1
-; CHECK-RV64VC-NEXT: mv a5, a2
-; CHECK-RV64VC-NEXT: bltu a2, s1, .LBB912_2
+; CHECK-RV64VC-NEXT: and a4, a4, a2
+; CHECK-RV64VC-NEXT: sub a5, a4, s1
+; CHECK-RV64VC-NEXT: sltu a2, a4, a5
+; CHECK-RV64VC-NEXT: bltu a4, s1, .LBB912_2
; CHECK-RV64VC-NEXT: # %bb.1:
-; CHECK-RV64VC-NEXT: mv a5, s1
+; CHECK-RV64VC-NEXT: mv a4, s1
; CHECK-RV64VC-NEXT: .LBB912_2:
-; CHECK-RV64VC-NEXT: sltu a6, a2, t0
+; CHECK-RV64VC-NEXT: addi a2, a2, -1
; CHECK-RV64VC-NEXT: bltu a3, a1, .LBB912_4
; CHECK-RV64VC-NEXT: # %bb.3:
; CHECK-RV64VC-NEXT: mv a3, a1
; CHECK-RV64VC-NEXT: .LBB912_4:
-; CHECK-RV64VC-NEXT: add a0, a0, s2
-; CHECK-RV64VC-NEXT: addi a6, a6, -1
+; CHECK-RV64VC-NEXT: add a7, s2, a0
+; CHECK-RV64VC-NEXT: and a0, a2, a5
; CHECK-RV64VC-NEXT: sub a2, a3, s1
-; CHECK-RV64VC-NEXT: sltu a4, a3, a2
-; CHECK-RV64VC-NEXT: addi a4, a4, -1
-; CHECK-RV64VC-NEXT: and a2, a2, a4
+; CHECK-RV64VC-NEXT: sltu a5, a3, a2
+; CHECK-RV64VC-NEXT: addi a5, a5, -1
+; CHECK-RV64VC-NEXT: and a5, a5, a2
; CHECK-RV64VC-NEXT: bltu a3, s1, .LBB912_6
; CHECK-RV64VC-NEXT: # %bb.5:
; CHECK-RV64VC-NEXT: mv a3, s1
; CHECK-RV64VC-NEXT: .LBB912_6:
-; CHECK-RV64VC-NEXT: vl8re64.v v16, (a0)
-; CHECK-RV64VC-NEXT: addi a0, sp, 16
-; CHECK-RV64VC-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; CHECK-RV64VC-NEXT: vsetvli zero, a5, e8, m1, ta, ma
+; CHECK-RV64VC-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-RV64VC-NEXT: vle64.v v16, (a7)
+; CHECK-RV64VC-NEXT: addi a2, sp, 16
+; CHECK-RV64VC-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
+; CHECK-RV64VC-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.s1
; CHECK-RV64VC-NEXT: vluxei64.v v14, (zero), v24
-; CHECK-RV64VC-NEXT: csrr a0, vlenb
-; CHECK-RV64VC-NEXT: slli a0, a0, 3
-; CHECK-RV64VC-NEXT: add a0, a0, sp
-; CHECK-RV64VC-NEXT: addi a0, a0, 16
-; CHECK-RV64VC-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; CHECK-RV64VC-NEXT: vsetvli zero, a2, e8, m1, ta, ma
+; CHECK-RV64VC-NEXT: csrr a2, vlenb
+; CHECK-RV64VC-NEXT: slli a2, a2, 3
+; CHECK-RV64VC-NEXT: add a2, a2, sp
+; CHECK-RV64VC-NEXT: addi a2, a2, 16
+; CHECK-RV64VC-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
+; CHECK-RV64VC-NEXT: vsetvli zero, a5, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.s1
; CHECK-RV64VC-NEXT: vluxei64.v v13, (zero), v24
-; CHECK-RV64VC-NEXT: csrr a0, vlenb
-; CHECK-RV64VC-NEXT: slli a0, a0, 4
-; CHECK-RV64VC-NEXT: add a0, a0, sp
-; CHECK-RV64VC-NEXT: addi a0, a0, 16
-; CHECK-RV64VC-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; CHECK-RV64VC-NEXT: csrr a2, vlenb
+; CHECK-RV64VC-NEXT: slli a2, a2, 4
+; CHECK-RV64VC-NEXT: add a2, a2, sp
+; CHECK-RV64VC-NEXT: addi a2, a2, 16
+; CHECK-RV64VC-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.s1
; CHECK-RV64VC-NEXT: vluxei64.v v12, (zero), v24
-; CHECK-RV64VC-NEXT: and a0, a6, t0
-; CHECK-RV64VC-NEXT: bltu s0, a7, .LBB912_8
+; CHECK-RV64VC-NEXT: bltu s0, a6, .LBB912_8
; CHECK-RV64VC-NEXT: # %bb.7:
-; CHECK-RV64VC-NEXT: mv s0, a7
+; CHECK-RV64VC-NEXT: mv s0, a6
; CHECK-RV64VC-NEXT: .LBB912_8:
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.s1
@@ -36053,32 +36078,33 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_S1(<vscale x 64 x
; CHECK-RV32VC-NEXT: sub sp, sp, a1
; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV32VC-NEXT: addi a1, sp, 16
-; CHECK-RV32VC-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV32VC-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV32VC-NEXT: csrr a4, vlenb
-; CHECK-RV32VC-NEXT: vl8re32.v v8, (a0)
+; CHECK-RV32VC-NEXT: vl8re32.v v16, (a0)
; CHECK-RV32VC-NEXT: slli a3, a4, 3
; CHECK-RV32VC-NEXT: slli a1, a4, 2
-; CHECK-RV32VC-NEXT: add a0, a0, a3
-; CHECK-RV32VC-NEXT: sub a3, a2, a1
-; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0)
-; CHECK-RV32VC-NEXT: sltu a0, a2, a3
-; CHECK-RV32VC-NEXT: addi a0, a0, -1
+; CHECK-RV32VC-NEXT: add a6, a0, a3
+; CHECK-RV32VC-NEXT: sub a0, a2, a1
+; CHECK-RV32VC-NEXT: sltu a3, a2, a0
+; CHECK-RV32VC-NEXT: addi a3, a3, -1
; CHECK-RV32VC-NEXT: and a3, a3, a0
; CHECK-RV32VC-NEXT: slli a0, a4, 1
; CHECK-RV32VC-NEXT: sub a4, a3, a0
; CHECK-RV32VC-NEXT: sltu a5, a3, a4
; CHECK-RV32VC-NEXT: addi a5, a5, -1
; CHECK-RV32VC-NEXT: and a4, a4, a5
+; CHECK-RV32VC-NEXT: vsetvli zero, a4, e32, m8, ta, ma
+; CHECK-RV32VC-NEXT: vle32.v v0, (a6)
; CHECK-RV32VC-NEXT: bltu a3, a0, .LBB912_2
; CHECK-RV32VC-NEXT: # %bb.1:
; CHECK-RV32VC-NEXT: mv a3, a0
; CHECK-RV32VC-NEXT: .LBB912_2:
-; CHECK-RV32VC-NEXT: vsetvli zero, a4, e8, m2, ta, ma
+; CHECK-RV32VC-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; CHECK-RV32VC-NEXT: c.ntl.s1
; CHECK-RV32VC-NEXT: vluxei32.v v30, (zero), v0
; CHECK-RV32VC-NEXT: vsetvli zero, a3, e8, m2, ta, ma
; CHECK-RV32VC-NEXT: c.ntl.s1
-; CHECK-RV32VC-NEXT: vluxei32.v v28, (zero), v8
+; CHECK-RV32VC-NEXT: vluxei32.v v28, (zero), v16
; CHECK-RV32VC-NEXT: bltu a2, a1, .LBB912_4
; CHECK-RV32VC-NEXT: # %bb.3:
; CHECK-RV32VC-NEXT: mv a2, a1
@@ -36087,6 +36113,8 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_S1(<vscale x 64 x
; CHECK-RV32VC-NEXT: sltu a3, a2, a1
; CHECK-RV32VC-NEXT: addi a3, a3, -1
; CHECK-RV32VC-NEXT: and a1, a1, a3
+; CHECK-RV32VC-NEXT: addi a3, sp, 16
+; CHECK-RV32VC-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV32VC-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-RV32VC-NEXT: c.ntl.s1
; CHECK-RV32VC-NEXT: vluxei32.v v26, (zero), v16
@@ -36094,8 +36122,6 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_S1(<vscale x 64 x
; CHECK-RV32VC-NEXT: # %bb.5:
; CHECK-RV32VC-NEXT: mv a2, a0
; CHECK-RV32VC-NEXT: .LBB912_6:
-; CHECK-RV32VC-NEXT: addi a0, sp, 16
-; CHECK-RV32VC-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV32VC-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-RV32VC-NEXT: c.ntl.s1
; CHECK-RV32VC-NEXT: vluxei32.v v24, (zero), v8
@@ -36194,51 +36220,51 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_ALL(<vscale x 64 x
; CHECK-RV64V-NEXT: sub a4, a3, a1
; CHECK-RV64V-NEXT: sltu a5, a3, a4
; CHECK-RV64V-NEXT: addi a5, a5, -1
-; CHECK-RV64V-NEXT: and a6, a5, a4
-; CHECK-RV64V-NEXT: sub a4, a6, s1
-; CHECK-RV64V-NEXT: mv a5, a6
-; CHECK-RV64V-NEXT: bltu a6, s1, .LBB913_2
+; CHECK-RV64V-NEXT: and a4, a5, a4
+; CHECK-RV64V-NEXT: sub a5, a4, s1
+; CHECK-RV64V-NEXT: sltu a7, a4, a5
+; CHECK-RV64V-NEXT: bltu a4, s1, .LBB913_2
; CHECK-RV64V-NEXT: # %bb.1:
-; CHECK-RV64V-NEXT: mv a5, s1
+; CHECK-RV64V-NEXT: mv a4, s1
; CHECK-RV64V-NEXT: .LBB913_2:
-; CHECK-RV64V-NEXT: sltu a7, a6, a4
+; CHECK-RV64V-NEXT: addi a7, a7, -1
; CHECK-RV64V-NEXT: bltu a3, a1, .LBB913_4
; CHECK-RV64V-NEXT: # %bb.3:
; CHECK-RV64V-NEXT: mv a3, a1
; CHECK-RV64V-NEXT: .LBB913_4:
; CHECK-RV64V-NEXT: add a6, s2, a0
-; CHECK-RV64V-NEXT: addi a0, a7, -1
-; CHECK-RV64V-NEXT: sub a7, a3, s1
-; CHECK-RV64V-NEXT: sltu t0, a3, a7
-; CHECK-RV64V-NEXT: addi t0, t0, -1
-; CHECK-RV64V-NEXT: and a7, t0, a7
+; CHECK-RV64V-NEXT: and a0, a7, a5
+; CHECK-RV64V-NEXT: sub a5, a3, s1
+; CHECK-RV64V-NEXT: sltu a7, a3, a5
+; CHECK-RV64V-NEXT: addi a7, a7, -1
+; CHECK-RV64V-NEXT: and a5, a7, a5
; CHECK-RV64V-NEXT: bltu a3, s1, .LBB913_6
; CHECK-RV64V-NEXT: # %bb.5:
; CHECK-RV64V-NEXT: mv a3, s1
; CHECK-RV64V-NEXT: .LBB913_6:
-; CHECK-RV64V-NEXT: vl8re64.v v16, (a6)
+; CHECK-RV64V-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-RV64V-NEXT: vle64.v v16, (a6)
; CHECK-RV64V-NEXT: addi a6, sp, 16
; CHECK-RV64V-NEXT: vl8r.v v24, (a6) # vscale x 64-byte Folded Reload
-; CHECK-RV64V-NEXT: vsetvli zero, a5, e8, m1, ta, ma
+; CHECK-RV64V-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.all
; CHECK-RV64V-NEXT: vluxei64.v v14, (zero), v24
-; CHECK-RV64V-NEXT: csrr a5, vlenb
-; CHECK-RV64V-NEXT: slli a5, a5, 3
-; CHECK-RV64V-NEXT: add a5, sp, a5
-; CHECK-RV64V-NEXT: addi a5, a5, 16
-; CHECK-RV64V-NEXT: vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
-; CHECK-RV64V-NEXT: vsetvli zero, a7, e8, m1, ta, ma
+; CHECK-RV64V-NEXT: csrr a4, vlenb
+; CHECK-RV64V-NEXT: slli a4, a4, 3
+; CHECK-RV64V-NEXT: add a4, sp, a4
+; CHECK-RV64V-NEXT: addi a4, a4, 16
+; CHECK-RV64V-NEXT: vl8r.v v24, (a4) # vscale x 64-byte Folded Reload
+; CHECK-RV64V-NEXT: vsetvli zero, a5, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.all
; CHECK-RV64V-NEXT: vluxei64.v v13, (zero), v24
-; CHECK-RV64V-NEXT: csrr a5, vlenb
-; CHECK-RV64V-NEXT: slli a5, a5, 4
-; CHECK-RV64V-NEXT: add a5, sp, a5
-; CHECK-RV64V-NEXT: addi a5, a5, 16
-; CHECK-RV64V-NEXT: vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
+; CHECK-RV64V-NEXT: csrr a4, vlenb
+; CHECK-RV64V-NEXT: slli a4, a4, 4
+; CHECK-RV64V-NEXT: add a4, sp, a4
+; CHECK-RV64V-NEXT: addi a4, a4, 16
+; CHECK-RV64V-NEXT: vl8r.v v24, (a4) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.all
; CHECK-RV64V-NEXT: vluxei64.v v12, (zero), v24
-; CHECK-RV64V-NEXT: and a0, a0, a4
; CHECK-RV64V-NEXT: bltu s0, a2, .LBB913_8
; CHECK-RV64V-NEXT: # %bb.7:
; CHECK-RV64V-NEXT: mv s0, a2
@@ -36332,32 +36358,33 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_ALL(<vscale x 64 x
; CHECK-RV32V-NEXT: sub sp, sp, a1
; CHECK-RV32V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV32V-NEXT: addi a1, sp, 16
-; CHECK-RV32V-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV32V-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV32V-NEXT: csrr a4, vlenb
-; CHECK-RV32V-NEXT: vl8re32.v v8, (a0)
+; CHECK-RV32V-NEXT: vl8re32.v v16, (a0)
; CHECK-RV32V-NEXT: slli a3, a4, 3
; CHECK-RV32V-NEXT: slli a1, a4, 2
-; CHECK-RV32V-NEXT: add a0, a0, a3
-; CHECK-RV32V-NEXT: sub a3, a2, a1
-; CHECK-RV32V-NEXT: vl8re32.v v0, (a0)
-; CHECK-RV32V-NEXT: sltu a0, a2, a3
-; CHECK-RV32V-NEXT: addi a0, a0, -1
-; CHECK-RV32V-NEXT: and a3, a0, a3
+; CHECK-RV32V-NEXT: add a5, a0, a3
+; CHECK-RV32V-NEXT: sub a0, a2, a1
+; CHECK-RV32V-NEXT: sltu a3, a2, a0
+; CHECK-RV32V-NEXT: addi a3, a3, -1
+; CHECK-RV32V-NEXT: and a3, a3, a0
; CHECK-RV32V-NEXT: slli a0, a4, 1
; CHECK-RV32V-NEXT: sub a4, a3, a0
-; CHECK-RV32V-NEXT: sltu a5, a3, a4
-; CHECK-RV32V-NEXT: addi a5, a5, -1
-; CHECK-RV32V-NEXT: and a4, a5, a4
+; CHECK-RV32V-NEXT: sltu a6, a3, a4
+; CHECK-RV32V-NEXT: addi a6, a6, -1
+; CHECK-RV32V-NEXT: and a4, a6, a4
+; CHECK-RV32V-NEXT: vsetvli zero, a4, e32, m8, ta, ma
+; CHECK-RV32V-NEXT: vle32.v v0, (a5)
; CHECK-RV32V-NEXT: bltu a3, a0, .LBB913_2
; CHECK-RV32V-NEXT: # %bb.1:
; CHECK-RV32V-NEXT: mv a3, a0
; CHECK-RV32V-NEXT: .LBB913_2:
-; CHECK-RV32V-NEXT: vsetvli zero, a4, e8, m2, ta, ma
+; CHECK-RV32V-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; CHECK-RV32V-NEXT: ntl.all
; CHECK-RV32V-NEXT: vluxei32.v v30, (zero), v0
; CHECK-RV32V-NEXT: vsetvli zero, a3, e8, m2, ta, ma
; CHECK-RV32V-NEXT: ntl.all
-; CHECK-RV32V-NEXT: vluxei32.v v28, (zero), v8
+; CHECK-RV32V-NEXT: vluxei32.v v28, (zero), v16
; CHECK-RV32V-NEXT: bltu a2, a1, .LBB913_4
; CHECK-RV32V-NEXT: # %bb.3:
; CHECK-RV32V-NEXT: mv a2, a1
@@ -36366,6 +36393,8 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_ALL(<vscale x 64 x
; CHECK-RV32V-NEXT: sltu a3, a2, a1
; CHECK-RV32V-NEXT: addi a3, a3, -1
; CHECK-RV32V-NEXT: and a1, a3, a1
+; CHECK-RV32V-NEXT: addi a3, sp, 16
+; CHECK-RV32V-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV32V-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-RV32V-NEXT: ntl.all
; CHECK-RV32V-NEXT: vluxei32.v v26, (zero), v16
@@ -36373,8 +36402,6 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_ALL(<vscale x 64 x
; CHECK-RV32V-NEXT: # %bb.5:
; CHECK-RV32V-NEXT: mv a2, a0
; CHECK-RV32V-NEXT: .LBB913_6:
-; CHECK-RV32V-NEXT: addi a0, sp, 16
-; CHECK-RV32V-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV32V-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-RV32V-NEXT: ntl.all
; CHECK-RV32V-NEXT: vluxei32.v v24, (zero), v8
@@ -36459,8 +36486,8 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_ALL(<vscale x 64 x
; CHECK-RV64VC-NEXT: li a1, 40
; CHECK-RV64VC-NEXT: mv a0, s1
; CHECK-RV64VC-NEXT: call __muldi3
-; CHECK-RV64VC-NEXT: slli a7, s1, 2
-; CHECK-RV64VC-NEXT: sub a1, s0, a7
+; CHECK-RV64VC-NEXT: slli a6, s1, 2
+; CHECK-RV64VC-NEXT: sub a1, s0, a6
; CHECK-RV64VC-NEXT: sltu a2, s0, a1
; CHECK-RV64VC-NEXT: addi a2, a2, -1
; CHECK-RV64VC-NEXT: and a3, a2, a1
@@ -36468,54 +36495,54 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_ALL(<vscale x 64 x
; CHECK-RV64VC-NEXT: sub a2, a3, a1
; CHECK-RV64VC-NEXT: sltu a4, a3, a2
; CHECK-RV64VC-NEXT: addi a4, a4, -1
-; CHECK-RV64VC-NEXT: and a2, a2, a4
-; CHECK-RV64VC-NEXT: sub t0, a2, s1
-; CHECK-RV64VC-NEXT: mv a5, a2
-; CHECK-RV64VC-NEXT: bltu a2, s1, .LBB913_2
+; CHECK-RV64VC-NEXT: and a4, a4, a2
+; CHECK-RV64VC-NEXT: sub a5, a4, s1
+; CHECK-RV64VC-NEXT: sltu a2, a4, a5
+; CHECK-RV64VC-NEXT: bltu a4, s1, .LBB913_2
; CHECK-RV64VC-NEXT: # %bb.1:
-; CHECK-RV64VC-NEXT: mv a5, s1
+; CHECK-RV64VC-NEXT: mv a4, s1
; CHECK-RV64VC-NEXT: .LBB913_2:
-; CHECK-RV64VC-NEXT: sltu a6, a2, t0
+; CHECK-RV64VC-NEXT: addi a2, a2, -1
; CHECK-RV64VC-NEXT: bltu a3, a1, .LBB913_4
; CHECK-RV64VC-NEXT: # %bb.3:
; CHECK-RV64VC-NEXT: mv a3, a1
; CHECK-RV64VC-NEXT: .LBB913_4:
-; CHECK-RV64VC-NEXT: add a0, a0, s2
-; CHECK-RV64VC-NEXT: addi a6, a6, -1
+; CHECK-RV64VC-NEXT: add a7, s2, a0
+; CHECK-RV64VC-NEXT: and a0, a2, a5
; CHECK-RV64VC-NEXT: sub a2, a3, s1
-; CHECK-RV64VC-NEXT: sltu a4, a3, a2
-; CHECK-RV64VC-NEXT: addi a4, a4, -1
-; CHECK-RV64VC-NEXT: and a2, a2, a4
+; CHECK-RV64VC-NEXT: sltu a5, a3, a2
+; CHECK-RV64VC-NEXT: addi a5, a5, -1
+; CHECK-RV64VC-NEXT: and a5, a5, a2
; CHECK-RV64VC-NEXT: bltu a3, s1, .LBB913_6
; CHECK-RV64VC-NEXT: # %bb.5:
; CHECK-RV64VC-NEXT: mv a3, s1
; CHECK-RV64VC-NEXT: .LBB913_6:
-; CHECK-RV64VC-NEXT: vl8re64.v v16, (a0)
-; CHECK-RV64VC-NEXT: addi a0, sp, 16
-; CHECK-RV64VC-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; CHECK-RV64VC-NEXT: vsetvli zero, a5, e8, m1, ta, ma
+; CHECK-RV64VC-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-RV64VC-NEXT: vle64.v v16, (a7)
+; CHECK-RV64VC-NEXT: addi a2, sp, 16
+; CHECK-RV64VC-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
+; CHECK-RV64VC-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
; CHECK-RV64VC-NEXT: vluxei64.v v14, (zero), v24
-; CHECK-RV64VC-NEXT: csrr a0, vlenb
-; CHECK-RV64VC-NEXT: slli a0, a0, 3
-; CHECK-RV64VC-NEXT: add a0, a0, sp
-; CHECK-RV64VC-NEXT: addi a0, a0, 16
-; CHECK-RV64VC-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; CHECK-RV64VC-NEXT: vsetvli zero, a2, e8, m1, ta, ma
+; CHECK-RV64VC-NEXT: csrr a2, vlenb
+; CHECK-RV64VC-NEXT: slli a2, a2, 3
+; CHECK-RV64VC-NEXT: add a2, a2, sp
+; CHECK-RV64VC-NEXT: addi a2, a2, 16
+; CHECK-RV64VC-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
+; CHECK-RV64VC-NEXT: vsetvli zero, a5, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
; CHECK-RV64VC-NEXT: vluxei64.v v13, (zero), v24
-; CHECK-RV64VC-NEXT: csrr a0, vlenb
-; CHECK-RV64VC-NEXT: slli a0, a0, 4
-; CHECK-RV64VC-NEXT: add a0, a0, sp
-; CHECK-RV64VC-NEXT: addi a0, a0, 16
-; CHECK-RV64VC-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; CHECK-RV64VC-NEXT: csrr a2, vlenb
+; CHECK-RV64VC-NEXT: slli a2, a2, 4
+; CHECK-RV64VC-NEXT: add a2, a2, sp
+; CHECK-RV64VC-NEXT: addi a2, a2, 16
+; CHECK-RV64VC-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
; CHECK-RV64VC-NEXT: vluxei64.v v12, (zero), v24
-; CHECK-RV64VC-NEXT: and a0, a6, t0
-; CHECK-RV64VC-NEXT: bltu s0, a7, .LBB913_8
+; CHECK-RV64VC-NEXT: bltu s0, a6, .LBB913_8
; CHECK-RV64VC-NEXT: # %bb.7:
-; CHECK-RV64VC-NEXT: mv s0, a7
+; CHECK-RV64VC-NEXT: mv s0, a6
; CHECK-RV64VC-NEXT: .LBB913_8:
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
@@ -36606,32 +36633,33 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_ALL(<vscale x 64 x
; CHECK-RV32VC-NEXT: sub sp, sp, a1
; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV32VC-NEXT: addi a1, sp, 16
-; CHECK-RV32VC-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV32VC-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV32VC-NEXT: csrr a4, vlenb
-; CHECK-RV32VC-NEXT: vl8re32.v v8, (a0)
+; CHECK-RV32VC-NEXT: vl8re32.v v16, (a0)
; CHECK-RV32VC-NEXT: slli a3, a4, 3
; CHECK-RV32VC-NEXT: slli a1, a4, 2
-; CHECK-RV32VC-NEXT: add a0, a0, a3
-; CHECK-RV32VC-NEXT: sub a3, a2, a1
-; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0)
-; CHECK-RV32VC-NEXT: sltu a0, a2, a3
-; CHECK-RV32VC-NEXT: addi a0, a0, -1
+; CHECK-RV32VC-NEXT: add a6, a0, a3
+; CHECK-RV32VC-NEXT: sub a0, a2, a1
+; CHECK-RV32VC-NEXT: sltu a3, a2, a0
+; CHECK-RV32VC-NEXT: addi a3, a3, -1
; CHECK-RV32VC-NEXT: and a3, a3, a0
; CHECK-RV32VC-NEXT: slli a0, a4, 1
; CHECK-RV32VC-NEXT: sub a4, a3, a0
; CHECK-RV32VC-NEXT: sltu a5, a3, a4
; CHECK-RV32VC-NEXT: addi a5, a5, -1
; CHECK-RV32VC-NEXT: and a4, a4, a5
+; CHECK-RV32VC-NEXT: vsetvli zero, a4, e32, m8, ta, ma
+; CHECK-RV32VC-NEXT: vle32.v v0, (a6)
; CHECK-RV32VC-NEXT: bltu a3, a0, .LBB913_2
; CHECK-RV32VC-NEXT: # %bb.1:
; CHECK-RV32VC-NEXT: mv a3, a0
; CHECK-RV32VC-NEXT: .LBB913_2:
-; CHECK-RV32VC-NEXT: vsetvli zero, a4, e8, m2, ta, ma
+; CHECK-RV32VC-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; CHECK-RV32VC-NEXT: c.ntl.all
; CHECK-RV32VC-NEXT: vluxei32.v v30, (zero), v0
; CHECK-RV32VC-NEXT: vsetvli zero, a3, e8, m2, ta, ma
; CHECK-RV32VC-NEXT: c.ntl.all
-; CHECK-RV32VC-NEXT: vluxei32.v v28, (zero), v8
+; CHECK-RV32VC-NEXT: vluxei32.v v28, (zero), v16
; CHECK-RV32VC-NEXT: bltu a2, a1, .LBB913_4
; CHECK-RV32VC-NEXT: # %bb.3:
; CHECK-RV32VC-NEXT: mv a2, a1
@@ -36640,6 +36668,8 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_ALL(<vscale x 64 x
; CHECK-RV32VC-NEXT: sltu a3, a2, a1
; CHECK-RV32VC-NEXT: addi a3, a3, -1
; CHECK-RV32VC-NEXT: and a1, a1, a3
+; CHECK-RV32VC-NEXT: addi a3, sp, 16
+; CHECK-RV32VC-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV32VC-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-RV32VC-NEXT: c.ntl.all
; CHECK-RV32VC-NEXT: vluxei32.v v26, (zero), v16
@@ -36647,8 +36677,6 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_ALL(<vscale x 64 x
; CHECK-RV32VC-NEXT: # %bb.5:
; CHECK-RV32VC-NEXT: mv a2, a0
; CHECK-RV32VC-NEXT: .LBB913_6:
-; CHECK-RV32VC-NEXT: addi a0, sp, 16
-; CHECK-RV32VC-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV32VC-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-RV32VC-NEXT: c.ntl.all
; CHECK-RV32VC-NEXT: vluxei32.v v24, (zero), v8
@@ -36746,51 +36774,51 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_DEFAULT(<vscale x
; CHECK-RV64V-NEXT: sub a4, a3, a1
; CHECK-RV64V-NEXT: sltu a5, a3, a4
; CHECK-RV64V-NEXT: addi a5, a5, -1
-; CHECK-RV64V-NEXT: and a6, a5, a4
-; CHECK-RV64V-NEXT: sub a4, a6, s1
-; CHECK-RV64V-NEXT: mv a5, a6
-; CHECK-RV64V-NEXT: bltu a6, s1, .LBB914_2
+; CHECK-RV64V-NEXT: and a4, a5, a4
+; CHECK-RV64V-NEXT: sub a5, a4, s1
+; CHECK-RV64V-NEXT: sltu a7, a4, a5
+; CHECK-RV64V-NEXT: bltu a4, s1, .LBB914_2
; CHECK-RV64V-NEXT: # %bb.1:
-; CHECK-RV64V-NEXT: mv a5, s1
+; CHECK-RV64V-NEXT: mv a4, s1
; CHECK-RV64V-NEXT: .LBB914_2:
-; CHECK-RV64V-NEXT: sltu a7, a6, a4
+; CHECK-RV64V-NEXT: addi a7, a7, -1
; CHECK-RV64V-NEXT: bltu a3, a1, .LBB914_4
; CHECK-RV64V-NEXT: # %bb.3:
; CHECK-RV64V-NEXT: mv a3, a1
; CHECK-RV64V-NEXT: .LBB914_4:
; CHECK-RV64V-NEXT: add a6, s2, a0
-; CHECK-RV64V-NEXT: addi a0, a7, -1
-; CHECK-RV64V-NEXT: sub a7, a3, s1
-; CHECK-RV64V-NEXT: sltu t0, a3, a7
-; CHECK-RV64V-NEXT: addi t0, t0, -1
-; CHECK-RV64V-NEXT: and a7, t0, a7
+; CHECK-RV64V-NEXT: and a0, a7, a5
+; CHECK-RV64V-NEXT: sub a5, a3, s1
+; CHECK-RV64V-NEXT: sltu a7, a3, a5
+; CHECK-RV64V-NEXT: addi a7, a7, -1
+; CHECK-RV64V-NEXT: and a5, a7, a5
; CHECK-RV64V-NEXT: bltu a3, s1, .LBB914_6
; CHECK-RV64V-NEXT: # %bb.5:
; CHECK-RV64V-NEXT: mv a3, s1
; CHECK-RV64V-NEXT: .LBB914_6:
-; CHECK-RV64V-NEXT: vl8re64.v v16, (a6)
+; CHECK-RV64V-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-RV64V-NEXT: vle64.v v16, (a6)
; CHECK-RV64V-NEXT: addi a6, sp, 16
; CHECK-RV64V-NEXT: vl8r.v v24, (a6) # vscale x 64-byte Folded Reload
-; CHECK-RV64V-NEXT: vsetvli zero, a5, e8, m1, ta, ma
+; CHECK-RV64V-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.all
; CHECK-RV64V-NEXT: vluxei64.v v14, (zero), v24
-; CHECK-RV64V-NEXT: csrr a5, vlenb
-; CHECK-RV64V-NEXT: slli a5, a5, 3
-; CHECK-RV64V-NEXT: add a5, sp, a5
-; CHECK-RV64V-NEXT: addi a5, a5, 16
-; CHECK-RV64V-NEXT: vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
-; CHECK-RV64V-NEXT: vsetvli zero, a7, e8, m1, ta, ma
+; CHECK-RV64V-NEXT: csrr a4, vlenb
+; CHECK-RV64V-NEXT: slli a4, a4, 3
+; CHECK-RV64V-NEXT: add a4, sp, a4
+; CHECK-RV64V-NEXT: addi a4, a4, 16
+; CHECK-RV64V-NEXT: vl8r.v v24, (a4) # vscale x 64-byte Folded Reload
+; CHECK-RV64V-NEXT: vsetvli zero, a5, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.all
; CHECK-RV64V-NEXT: vluxei64.v v13, (zero), v24
-; CHECK-RV64V-NEXT: csrr a5, vlenb
-; CHECK-RV64V-NEXT: slli a5, a5, 4
-; CHECK-RV64V-NEXT: add a5, sp, a5
-; CHECK-RV64V-NEXT: addi a5, a5, 16
-; CHECK-RV64V-NEXT: vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
+; CHECK-RV64V-NEXT: csrr a4, vlenb
+; CHECK-RV64V-NEXT: slli a4, a4, 4
+; CHECK-RV64V-NEXT: add a4, sp, a4
+; CHECK-RV64V-NEXT: addi a4, a4, 16
+; CHECK-RV64V-NEXT: vl8r.v v24, (a4) # vscale x 64-byte Folded Reload
; CHECK-RV64V-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64V-NEXT: ntl.all
; CHECK-RV64V-NEXT: vluxei64.v v12, (zero), v24
-; CHECK-RV64V-NEXT: and a0, a0, a4
; CHECK-RV64V-NEXT: bltu s0, a2, .LBB914_8
; CHECK-RV64V-NEXT: # %bb.7:
; CHECK-RV64V-NEXT: mv s0, a2
@@ -36884,32 +36912,33 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_DEFAULT(<vscale x
; CHECK-RV32V-NEXT: sub sp, sp, a1
; CHECK-RV32V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV32V-NEXT: addi a1, sp, 16
-; CHECK-RV32V-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV32V-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV32V-NEXT: csrr a4, vlenb
-; CHECK-RV32V-NEXT: vl8re32.v v8, (a0)
+; CHECK-RV32V-NEXT: vl8re32.v v16, (a0)
; CHECK-RV32V-NEXT: slli a3, a4, 3
; CHECK-RV32V-NEXT: slli a1, a4, 2
-; CHECK-RV32V-NEXT: add a0, a0, a3
-; CHECK-RV32V-NEXT: sub a3, a2, a1
-; CHECK-RV32V-NEXT: vl8re32.v v0, (a0)
-; CHECK-RV32V-NEXT: sltu a0, a2, a3
-; CHECK-RV32V-NEXT: addi a0, a0, -1
-; CHECK-RV32V-NEXT: and a3, a0, a3
+; CHECK-RV32V-NEXT: add a5, a0, a3
+; CHECK-RV32V-NEXT: sub a0, a2, a1
+; CHECK-RV32V-NEXT: sltu a3, a2, a0
+; CHECK-RV32V-NEXT: addi a3, a3, -1
+; CHECK-RV32V-NEXT: and a3, a3, a0
; CHECK-RV32V-NEXT: slli a0, a4, 1
; CHECK-RV32V-NEXT: sub a4, a3, a0
-; CHECK-RV32V-NEXT: sltu a5, a3, a4
-; CHECK-RV32V-NEXT: addi a5, a5, -1
-; CHECK-RV32V-NEXT: and a4, a5, a4
+; CHECK-RV32V-NEXT: sltu a6, a3, a4
+; CHECK-RV32V-NEXT: addi a6, a6, -1
+; CHECK-RV32V-NEXT: and a4, a6, a4
+; CHECK-RV32V-NEXT: vsetvli zero, a4, e32, m8, ta, ma
+; CHECK-RV32V-NEXT: vle32.v v0, (a5)
; CHECK-RV32V-NEXT: bltu a3, a0, .LBB914_2
; CHECK-RV32V-NEXT: # %bb.1:
; CHECK-RV32V-NEXT: mv a3, a0
; CHECK-RV32V-NEXT: .LBB914_2:
-; CHECK-RV32V-NEXT: vsetvli zero, a4, e8, m2, ta, ma
+; CHECK-RV32V-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; CHECK-RV32V-NEXT: ntl.all
; CHECK-RV32V-NEXT: vluxei32.v v30, (zero), v0
; CHECK-RV32V-NEXT: vsetvli zero, a3, e8, m2, ta, ma
; CHECK-RV32V-NEXT: ntl.all
-; CHECK-RV32V-NEXT: vluxei32.v v28, (zero), v8
+; CHECK-RV32V-NEXT: vluxei32.v v28, (zero), v16
; CHECK-RV32V-NEXT: bltu a2, a1, .LBB914_4
; CHECK-RV32V-NEXT: # %bb.3:
; CHECK-RV32V-NEXT: mv a2, a1
@@ -36918,6 +36947,8 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_DEFAULT(<vscale x
; CHECK-RV32V-NEXT: sltu a3, a2, a1
; CHECK-RV32V-NEXT: addi a3, a3, -1
; CHECK-RV32V-NEXT: and a1, a3, a1
+; CHECK-RV32V-NEXT: addi a3, sp, 16
+; CHECK-RV32V-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV32V-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-RV32V-NEXT: ntl.all
; CHECK-RV32V-NEXT: vluxei32.v v26, (zero), v16
@@ -36925,8 +36956,6 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_DEFAULT(<vscale x
; CHECK-RV32V-NEXT: # %bb.5:
; CHECK-RV32V-NEXT: mv a2, a0
; CHECK-RV32V-NEXT: .LBB914_6:
-; CHECK-RV32V-NEXT: addi a0, sp, 16
-; CHECK-RV32V-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV32V-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-RV32V-NEXT: ntl.all
; CHECK-RV32V-NEXT: vluxei32.v v24, (zero), v8
@@ -37011,8 +37040,8 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_DEFAULT(<vscale x
; CHECK-RV64VC-NEXT: li a1, 40
; CHECK-RV64VC-NEXT: mv a0, s1
; CHECK-RV64VC-NEXT: call __muldi3
-; CHECK-RV64VC-NEXT: slli a7, s1, 2
-; CHECK-RV64VC-NEXT: sub a1, s0, a7
+; CHECK-RV64VC-NEXT: slli a6, s1, 2
+; CHECK-RV64VC-NEXT: sub a1, s0, a6
; CHECK-RV64VC-NEXT: sltu a2, s0, a1
; CHECK-RV64VC-NEXT: addi a2, a2, -1
; CHECK-RV64VC-NEXT: and a3, a2, a1
@@ -37020,54 +37049,54 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_DEFAULT(<vscale x
; CHECK-RV64VC-NEXT: sub a2, a3, a1
; CHECK-RV64VC-NEXT: sltu a4, a3, a2
; CHECK-RV64VC-NEXT: addi a4, a4, -1
-; CHECK-RV64VC-NEXT: and a2, a2, a4
-; CHECK-RV64VC-NEXT: sub t0, a2, s1
-; CHECK-RV64VC-NEXT: mv a5, a2
-; CHECK-RV64VC-NEXT: bltu a2, s1, .LBB914_2
+; CHECK-RV64VC-NEXT: and a4, a4, a2
+; CHECK-RV64VC-NEXT: sub a5, a4, s1
+; CHECK-RV64VC-NEXT: sltu a2, a4, a5
+; CHECK-RV64VC-NEXT: bltu a4, s1, .LBB914_2
; CHECK-RV64VC-NEXT: # %bb.1:
-; CHECK-RV64VC-NEXT: mv a5, s1
+; CHECK-RV64VC-NEXT: mv a4, s1
; CHECK-RV64VC-NEXT: .LBB914_2:
-; CHECK-RV64VC-NEXT: sltu a6, a2, t0
+; CHECK-RV64VC-NEXT: addi a2, a2, -1
; CHECK-RV64VC-NEXT: bltu a3, a1, .LBB914_4
; CHECK-RV64VC-NEXT: # %bb.3:
; CHECK-RV64VC-NEXT: mv a3, a1
; CHECK-RV64VC-NEXT: .LBB914_4:
-; CHECK-RV64VC-NEXT: add a0, a0, s2
-; CHECK-RV64VC-NEXT: addi a6, a6, -1
+; CHECK-RV64VC-NEXT: add a7, s2, a0
+; CHECK-RV64VC-NEXT: and a0, a2, a5
; CHECK-RV64VC-NEXT: sub a2, a3, s1
-; CHECK-RV64VC-NEXT: sltu a4, a3, a2
-; CHECK-RV64VC-NEXT: addi a4, a4, -1
-; CHECK-RV64VC-NEXT: and a2, a2, a4
+; CHECK-RV64VC-NEXT: sltu a5, a3, a2
+; CHECK-RV64VC-NEXT: addi a5, a5, -1
+; CHECK-RV64VC-NEXT: and a5, a5, a2
; CHECK-RV64VC-NEXT: bltu a3, s1, .LBB914_6
; CHECK-RV64VC-NEXT: # %bb.5:
; CHECK-RV64VC-NEXT: mv a3, s1
; CHECK-RV64VC-NEXT: .LBB914_6:
-; CHECK-RV64VC-NEXT: vl8re64.v v16, (a0)
-; CHECK-RV64VC-NEXT: addi a0, sp, 16
-; CHECK-RV64VC-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; CHECK-RV64VC-NEXT: vsetvli zero, a5, e8, m1, ta, ma
+; CHECK-RV64VC-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-RV64VC-NEXT: vle64.v v16, (a7)
+; CHECK-RV64VC-NEXT: addi a2, sp, 16
+; CHECK-RV64VC-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
+; CHECK-RV64VC-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
; CHECK-RV64VC-NEXT: vluxei64.v v14, (zero), v24
-; CHECK-RV64VC-NEXT: csrr a0, vlenb
-; CHECK-RV64VC-NEXT: slli a0, a0, 3
-; CHECK-RV64VC-NEXT: add a0, a0, sp
-; CHECK-RV64VC-NEXT: addi a0, a0, 16
-; CHECK-RV64VC-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; CHECK-RV64VC-NEXT: vsetvli zero, a2, e8, m1, ta, ma
+; CHECK-RV64VC-NEXT: csrr a2, vlenb
+; CHECK-RV64VC-NEXT: slli a2, a2, 3
+; CHECK-RV64VC-NEXT: add a2, a2, sp
+; CHECK-RV64VC-NEXT: addi a2, a2, 16
+; CHECK-RV64VC-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
+; CHECK-RV64VC-NEXT: vsetvli zero, a5, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
; CHECK-RV64VC-NEXT: vluxei64.v v13, (zero), v24
-; CHECK-RV64VC-NEXT: csrr a0, vlenb
-; CHECK-RV64VC-NEXT: slli a0, a0, 4
-; CHECK-RV64VC-NEXT: add a0, a0, sp
-; CHECK-RV64VC-NEXT: addi a0, a0, 16
-; CHECK-RV64VC-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; CHECK-RV64VC-NEXT: csrr a2, vlenb
+; CHECK-RV64VC-NEXT: slli a2, a2, 4
+; CHECK-RV64VC-NEXT: add a2, a2, sp
+; CHECK-RV64VC-NEXT: addi a2, a2, 16
+; CHECK-RV64VC-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
; CHECK-RV64VC-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
; CHECK-RV64VC-NEXT: vluxei64.v v12, (zero), v24
-; CHECK-RV64VC-NEXT: and a0, a6, t0
-; CHECK-RV64VC-NEXT: bltu s0, a7, .LBB914_8
+; CHECK-RV64VC-NEXT: bltu s0, a6, .LBB914_8
; CHECK-RV64VC-NEXT: # %bb.7:
-; CHECK-RV64VC-NEXT: mv s0, a7
+; CHECK-RV64VC-NEXT: mv s0, a6
; CHECK-RV64VC-NEXT: .LBB914_8:
; CHECK-RV64VC-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-RV64VC-NEXT: c.ntl.all
@@ -37158,32 +37187,33 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_DEFAULT(<vscale x
; CHECK-RV32VC-NEXT: sub sp, sp, a1
; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-RV32VC-NEXT: addi a1, sp, 16
-; CHECK-RV32VC-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-RV32VC-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
; CHECK-RV32VC-NEXT: csrr a4, vlenb
-; CHECK-RV32VC-NEXT: vl8re32.v v8, (a0)
+; CHECK-RV32VC-NEXT: vl8re32.v v16, (a0)
; CHECK-RV32VC-NEXT: slli a3, a4, 3
; CHECK-RV32VC-NEXT: slli a1, a4, 2
-; CHECK-RV32VC-NEXT: add a0, a0, a3
-; CHECK-RV32VC-NEXT: sub a3, a2, a1
-; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0)
-; CHECK-RV32VC-NEXT: sltu a0, a2, a3
-; CHECK-RV32VC-NEXT: addi a0, a0, -1
+; CHECK-RV32VC-NEXT: add a6, a0, a3
+; CHECK-RV32VC-NEXT: sub a0, a2, a1
+; CHECK-RV32VC-NEXT: sltu a3, a2, a0
+; CHECK-RV32VC-NEXT: addi a3, a3, -1
; CHECK-RV32VC-NEXT: and a3, a3, a0
; CHECK-RV32VC-NEXT: slli a0, a4, 1
; CHECK-RV32VC-NEXT: sub a4, a3, a0
; CHECK-RV32VC-NEXT: sltu a5, a3, a4
; CHECK-RV32VC-NEXT: addi a5, a5, -1
; CHECK-RV32VC-NEXT: and a4, a4, a5
+; CHECK-RV32VC-NEXT: vsetvli zero, a4, e32, m8, ta, ma
+; CHECK-RV32VC-NEXT: vle32.v v0, (a6)
; CHECK-RV32VC-NEXT: bltu a3, a0, .LBB914_2
; CHECK-RV32VC-NEXT: # %bb.1:
; CHECK-RV32VC-NEXT: mv a3, a0
; CHECK-RV32VC-NEXT: .LBB914_2:
-; CHECK-RV32VC-NEXT: vsetvli zero, a4, e8, m2, ta, ma
+; CHECK-RV32VC-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; CHECK-RV32VC-NEXT: c.ntl.all
; CHECK-RV32VC-NEXT: vluxei32.v v30, (zero), v0
; CHECK-RV32VC-NEXT: vsetvli zero, a3, e8, m2, ta, ma
; CHECK-RV32VC-NEXT: c.ntl.all
-; CHECK-RV32VC-NEXT: vluxei32.v v28, (zero), v8
+; CHECK-RV32VC-NEXT: vluxei32.v v28, (zero), v16
; CHECK-RV32VC-NEXT: bltu a2, a1, .LBB914_4
; CHECK-RV32VC-NEXT: # %bb.3:
; CHECK-RV32VC-NEXT: mv a2, a1
@@ -37192,6 +37222,8 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_DEFAULT(<vscale x
; CHECK-RV32VC-NEXT: sltu a3, a2, a1
; CHECK-RV32VC-NEXT: addi a3, a3, -1
; CHECK-RV32VC-NEXT: and a1, a1, a3
+; CHECK-RV32VC-NEXT: addi a3, sp, 16
+; CHECK-RV32VC-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload
; CHECK-RV32VC-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-RV32VC-NEXT: c.ntl.all
; CHECK-RV32VC-NEXT: vluxei32.v v26, (zero), v16
@@ -37199,8 +37231,6 @@ define <vscale x 64 x i8> @test_nontemporal_vp_gather_nxv64i8_DEFAULT(<vscale x
; CHECK-RV32VC-NEXT: # %bb.5:
; CHECK-RV32VC-NEXT: mv a2, a0
; CHECK-RV32VC-NEXT: .LBB914_6:
-; CHECK-RV32VC-NEXT: addi a0, sp, 16
-; CHECK-RV32VC-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-RV32VC-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-RV32VC-NEXT: c.ntl.all
; CHECK-RV32VC-NEXT: vluxei32.v v24, (zero), v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
index 9e0842c83f9ad..e420d49cab81e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
@@ -3487,23 +3487,25 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFH-NEXT: slli a1, a1, 3
; ZVFH-NEXT: sub sp, sp, a1
; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFH-NEXT: vsetvli a1, zero, e8, m1, ta, ma
+; ZVFH-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; ZVFH-NEXT: vmv1r.v v7, v0
; ZVFH-NEXT: addi a1, sp, 16
; ZVFH-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
-; ZVFH-NEXT: csrr a3, vlenb
-; ZVFH-NEXT: srli a1, a3, 1
-; ZVFH-NEXT: slli a4, a3, 3
-; ZVFH-NEXT: slli a3, a3, 2
+; ZVFH-NEXT: csrr a4, vlenb
+; ZVFH-NEXT: slli a3, a4, 2
+; ZVFH-NEXT: sub a1, a2, a3
+; ZVFH-NEXT: sltu a5, a2, a1
+; ZVFH-NEXT: addi a5, a5, -1
+; ZVFH-NEXT: and a5, a5, a1
+; ZVFH-NEXT: srli a1, a4, 1
+; ZVFH-NEXT: slli a4, a4, 3
; ZVFH-NEXT: add a4, a0, a4
-; ZVFH-NEXT: sub a5, a2, a3
-; ZVFH-NEXT: vl8re16.v v24, (a4)
-; ZVFH-NEXT: sltu a4, a2, a5
-; ZVFH-NEXT: addi a4, a4, -1
+; ZVFH-NEXT: vsetvli zero, a5, e16, m8, ta, ma
+; ZVFH-NEXT: vle16.v v24, (a4)
; ZVFH-NEXT: vl8re16.v v8, (a0)
+; ZVFH-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; ZVFH-NEXT: vslidedown.vx v0, v0, a1
-; ZVFH-NEXT: and a4, a4, a5
-; ZVFH-NEXT: vsetvli zero, a4, e16, m8, ta, ma
+; ZVFH-NEXT: vsetvli zero, a5, e16, m8, ta, ma
; ZVFH-NEXT: vmfeq.vv v6, v16, v24, v0.t
; ZVFH-NEXT: bltu a2, a3, .LBB171_2
; ZVFH-NEXT: # %bb.1:
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
index ceeeba466014c..7f0c9026ced5a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
@@ -1073,21 +1073,21 @@ define <vscale x 128 x i1> @icmp_eq_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: vlm.v v0, (a2)
; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a2, a0, a1
; CHECK-NEXT: sub a4, a3, a1
-; CHECK-NEXT: vl8r.v v24, (a2)
-; CHECK-NEXT: sltu a2, a3, a4
+; CHECK-NEXT: sltu a5, a3, a4
+; CHECK-NEXT: addi a5, a5, -1
+; CHECK-NEXT: and a4, a5, a4
+; CHECK-NEXT: add a5, a0, a1
+; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma
+; CHECK-NEXT: vlm.v v0, (a2)
+; CHECK-NEXT: vle8.v v24, (a5)
; CHECK-NEXT: vl8r.v v8, (a0)
-; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a4
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vv v6, v16, v24, v0.t
; CHECK-NEXT: bltu a3, a1, .LBB96_2
; CHECK-NEXT: # %bb.1:
@@ -1114,20 +1114,20 @@ define <vscale x 128 x i1> @icmp_eq_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
define <vscale x 128 x i1> @icmp_eq_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: csrr a3, vlenb
+; CHECK-NEXT: slli a3, a3, 3
+; CHECK-NEXT: sub a4, a2, a3
+; CHECK-NEXT: sltu a5, a2, a4
+; CHECK-NEXT: addi a5, a5, -1
+; CHECK-NEXT: and a4, a5, a4
+; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub a3, a2, a1
-; CHECK-NEXT: sltu a4, a2, a3
-; CHECK-NEXT: addi a4, a4, -1
-; CHECK-NEXT: and a3, a4, a3
-; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vx v25, v16, a0, v0.t
-; CHECK-NEXT: bltu a2, a1, .LBB97_2
+; CHECK-NEXT: bltu a2, a3, .LBB97_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a2, a1
+; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB97_2:
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
@@ -1144,20 +1144,20 @@ define <vscale x 128 x i1> @icmp_eq_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b,
define <vscale x 128 x i1> @icmp_eq_vx_swap_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_swap_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: csrr a3, vlenb
+; CHECK-NEXT: slli a3, a3, 3
+; CHECK-NEXT: sub a4, a2, a3
+; CHECK-NEXT: sltu a5, a2, a4
+; CHECK-NEXT: addi a5, a5, -1
+; CHECK-NEXT: and a4, a5, a4
+; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub a3, a2, a1
-; CHECK-NEXT: sltu a4, a2, a3
-; CHECK-NEXT: addi a4, a4, -1
-; CHECK-NEXT: and a3, a4, a3
-; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vx v25, v16, a0, v0.t
-; CHECK-NEXT: bltu a2, a1, .LBB98_2
+; CHECK-NEXT: bltu a2, a3, .LBB98_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a2, a1
+; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB98_2:
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
@@ -2203,23 +2203,25 @@ define <vscale x 32 x i1> @icmp_eq_vv_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: csrr a3, vlenb
-; CHECK-NEXT: srli a1, a3, 2
-; CHECK-NEXT: slli a4, a3, 3
-; CHECK-NEXT: slli a3, a3, 1
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: slli a3, a4, 1
+; CHECK-NEXT: sub a1, a2, a3
+; CHECK-NEXT: sltu a5, a2, a1
+; CHECK-NEXT: addi a5, a5, -1
+; CHECK-NEXT: and a5, a5, a1
+; CHECK-NEXT: srli a1, a4, 2
+; CHECK-NEXT: slli a4, a4, 3
; CHECK-NEXT: add a4, a0, a4
-; CHECK-NEXT: sub a5, a2, a3
-; CHECK-NEXT: vl8re32.v v24, (a4)
-; CHECK-NEXT: sltu a4, a2, a5
-; CHECK-NEXT: addi a4, a4, -1
+; CHECK-NEXT: vsetvli zero, a5, e32, m8, ta, ma
+; CHECK-NEXT: vle32.v v24, (a4)
; CHECK-NEXT: vl8re32.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a1
-; CHECK-NEXT: and a4, a4, a5
-; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma
+; CHECK-NEXT: vsetvli zero, a5, e32, m8, ta, ma
; CHECK-NEXT: vmseq.vv v6, v16, v24, v0.t
; CHECK-NEXT: bltu a2, a3, .LBB189_2
; CHECK-NEXT: # %bb.1:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
index c64b755051898..b755d47796912 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
@@ -545,20 +545,20 @@ define <vscale x 64 x i8> @vadd_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32
define <vscale x 128 x i8> @vadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub a3, a1, a2
+; CHECK-NEXT: sltu a4, a1, a3
+; CHECK-NEXT: addi a4, a4, -1
+; CHECK-NEXT: and a3, a4, a3
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: sub a2, a1, a0
-; CHECK-NEXT: sltu a3, a1, a2
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t
-; CHECK-NEXT: bltu a1, a0, .LBB50_2
+; CHECK-NEXT: bltu a1, a2, .LBB50_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB50_2:
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
index 9fb9b508d76b0..314eb4c45cf00 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
@@ -37,9 +37,9 @@ define <vscale x 1 x i1> @splice_nxv1i1_offset_negone(<vscale x 1 x i1> %a, <vsc
; VLDEP-NEXT: vmv.v.i v8, 0
; VLDEP-NEXT: csrr a0, vlenb
; VLDEP-NEXT: vmerge.vim v10, v8, 1, v0
+; VLDEP-NEXT: srli a0, a0, 3
; VLDEP-NEXT: vmv1r.v v0, v9
; VLDEP-NEXT: vmerge.vim v8, v8, 1, v0
-; VLDEP-NEXT: srli a0, a0, 3
; VLDEP-NEXT: addi a0, a0, -1
; VLDEP-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; VLDEP-NEXT: vslidedown.vx v8, v8, a0
@@ -121,9 +121,9 @@ define <vscale x 2 x i1> @splice_nxv2i1_offset_negone(<vscale x 2 x i1> %a, <vsc
; VLDEP-NEXT: vmv.v.i v8, 0
; VLDEP-NEXT: csrr a0, vlenb
; VLDEP-NEXT: vmerge.vim v10, v8, 1, v0
+; VLDEP-NEXT: srli a0, a0, 2
; VLDEP-NEXT: vmv1r.v v0, v9
; VLDEP-NEXT: vmerge.vim v8, v8, 1, v0
-; VLDEP-NEXT: srli a0, a0, 2
; VLDEP-NEXT: addi a0, a0, -1
; VLDEP-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; VLDEP-NEXT: vslidedown.vx v8, v8, a0
@@ -205,9 +205,9 @@ define <vscale x 4 x i1> @splice_nxv4i1_offset_negone(<vscale x 4 x i1> %a, <vsc
; VLDEP-NEXT: vmv.v.i v8, 0
; VLDEP-NEXT: csrr a0, vlenb
; VLDEP-NEXT: vmerge.vim v10, v8, 1, v0
+; VLDEP-NEXT: srli a0, a0, 1
; VLDEP-NEXT: vmv1r.v v0, v9
; VLDEP-NEXT: vmerge.vim v8, v8, 1, v0
-; VLDEP-NEXT: srli a0, a0, 1
; VLDEP-NEXT: addi a0, a0, -1
; VLDEP-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
; VLDEP-NEXT: vslidedown.vx v8, v8, a0
@@ -369,9 +369,9 @@ define <vscale x 16 x i1> @splice_nxv16i1_offset_negone(<vscale x 16 x i1> %a, <
; VLDEP-NEXT: vmv.v.i v10, 0
; VLDEP-NEXT: csrr a0, vlenb
; VLDEP-NEXT: vmerge.vim v12, v10, 1, v0
+; VLDEP-NEXT: slli a0, a0, 1
; VLDEP-NEXT: vmv1r.v v0, v9
; VLDEP-NEXT: vmerge.vim v8, v10, 1, v0
-; VLDEP-NEXT: slli a0, a0, 1
; VLDEP-NEXT: addi a0, a0, -1
; VLDEP-NEXT: vsetivli zero, 1, e8, m2, ta, ma
; VLDEP-NEXT: vslidedown.vx v8, v8, a0
@@ -453,9 +453,9 @@ define <vscale x 32 x i1> @splice_nxv32i1_offset_negone(<vscale x 32 x i1> %a, <
; VLDEP-NEXT: vmv.v.i v12, 0
; VLDEP-NEXT: csrr a0, vlenb
; VLDEP-NEXT: vmerge.vim v16, v12, 1, v0
+; VLDEP-NEXT: slli a0, a0, 2
; VLDEP-NEXT: vmv1r.v v0, v9
; VLDEP-NEXT: vmerge.vim v8, v12, 1, v0
-; VLDEP-NEXT: slli a0, a0, 2
; VLDEP-NEXT: addi a0, a0, -1
; VLDEP-NEXT: vsetivli zero, 1, e8, m4, ta, ma
; VLDEP-NEXT: vslidedown.vx v8, v8, a0
@@ -537,9 +537,9 @@ define <vscale x 64 x i1> @splice_nxv64i1_offset_negone(<vscale x 64 x i1> %a, <
; VLDEP-NEXT: vmv.v.i v16, 0
; VLDEP-NEXT: csrr a0, vlenb
; VLDEP-NEXT: vmerge.vim v24, v16, 1, v0
+; VLDEP-NEXT: slli a0, a0, 3
; VLDEP-NEXT: vmv1r.v v0, v9
; VLDEP-NEXT: vmerge.vim v8, v16, 1, v0
-; VLDEP-NEXT: slli a0, a0, 3
; VLDEP-NEXT: addi a0, a0, -1
; VLDEP-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; VLDEP-NEXT: vslidedown.vx v8, v8, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
index 318c6bb81d68c..e669518fe167d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
@@ -3408,7 +3408,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: add a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
-; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
@@ -3424,61 +3424,63 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: srli a3, a1, 3
-; CHECK-NEXT: slli a5, a1, 3
-; CHECK-NEXT: sub a6, a4, a1
-; CHECK-NEXT: add a7, a2, a5
-; CHECK-NEXT: add a5, a0, a5
-; CHECK-NEXT: vl8re64.v v8, (a7)
-; CHECK-NEXT: csrr a7, vlenb
-; CHECK-NEXT: slli a7, a7, 3
-; CHECK-NEXT: add a7, sp, a7
-; CHECK-NEXT: addi a7, a7, 16
-; CHECK-NEXT: vs8r.v v8, (a7) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: sltu a7, a4, a6
-; CHECK-NEXT: addi a7, a7, -1
-; CHECK-NEXT: vl8re64.v v8, (a5)
-; CHECK-NEXT: csrr a5, vlenb
-; CHECK-NEXT: slli a5, a5, 3
-; CHECK-NEXT: mv t0, a5
-; CHECK-NEXT: slli a5, a5, 2
-; CHECK-NEXT: add a5, a5, t0
-; CHECK-NEXT: add a5, sp, a5
-; CHECK-NEXT: addi a5, a5, 16
-; CHECK-NEXT: vs8r.v v8, (a5) # vscale x 64-byte Folded Spill
+; CHECK-NEXT: slli a3, a1, 3
+; CHECK-NEXT: sub a5, a4, a1
+; CHECK-NEXT: add a6, a0, a3
+; CHECK-NEXT: vl8re64.v v8, (a6)
+; CHECK-NEXT: csrr a6, vlenb
+; CHECK-NEXT: slli a6, a6, 3
+; CHECK-NEXT: mv a7, a6
+; CHECK-NEXT: slli a6, a6, 2
+; CHECK-NEXT: add a6, a6, a7
+; CHECK-NEXT: add a6, sp, a6
+; CHECK-NEXT: addi a6, a6, 16
+; CHECK-NEXT: vs8r.v v8, (a6) # vscale x 64-byte Folded Spill
+; CHECK-NEXT: sltu a6, a4, a5
+; CHECK-NEXT: addi a6, a6, -1
+; CHECK-NEXT: and a5, a6, a5
+; CHECK-NEXT: srli a6, a1, 3
+; CHECK-NEXT: add a3, a2, a3
; CHECK-NEXT: vl8re64.v v8, (a2)
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 4
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vl8re64.v v8, (a0)
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vslidedown.vx v0, v0, a3
-; CHECK-NEXT: and a0, a7, a6
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: mv a3, a2
-; CHECK-NEXT: slli a2, a2, 1
-; CHECK-NEXT: add a2, a2, a3
-; CHECK-NEXT: add a2, sp, a2
-; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: vsetvli zero, a5, e64, m8, ta, ma
+; CHECK-NEXT: vle64.v v8, (a3)
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 3
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: mv a3, a2
-; CHECK-NEXT: slli a2, a2, 2
-; CHECK-NEXT: add a2, a2, a3
-; CHECK-NEXT: add a2, sp, a2
-; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; CHECK-NEXT: vl8re64.v v8, (a0)
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v0, a6
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: mv a2, a0
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: add a0, a0, a2
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: mv a2, a0
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: add a0, a0, a2
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: vsetvli zero, a5, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -3535,73 +3537,47 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 5
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: mv a3, a1
-; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: add a1, a1, a3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: vl8re64.v v8, (a2)
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a3) # vscale x 64-byte Folded Spill
; CHECK-NEXT: slli a3, a1, 3
-; CHECK-NEXT: add a5, a2, a3
-; CHECK-NEXT: vl8re64.v v8, (a5)
-; CHECK-NEXT: csrr a5, vlenb
-; CHECK-NEXT: slli a5, a5, 3
-; CHECK-NEXT: add a5, sp, a5
-; CHECK-NEXT: addi a5, a5, 16
-; CHECK-NEXT: vs8r.v v8, (a5) # vscale x 64-byte Folded Spill
; CHECK-NEXT: sub a5, a4, a1
+; CHECK-NEXT: add a2, a2, a3
; CHECK-NEXT: add a3, a0, a3
-; CHECK-NEXT: vl8re64.v v24, (a3)
+; CHECK-NEXT: vl8re64.v v8, (a3)
; CHECK-NEXT: sltu a3, a4, a5
-; CHECK-NEXT: vl8re64.v v8, (a2)
-; CHECK-NEXT: addi a2, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vl8re64.v v0, (a0)
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a3, a3, a5
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; CHECK-NEXT: vfmadd.vv v24, v16, v8
+; CHECK-NEXT: vle64.v v24, (a2)
+; CHECK-NEXT: vl8re64.v v0, (a0)
+; CHECK-NEXT: vfmacc.vv v24, v16, v8
; CHECK-NEXT: bltu a4, a1, .LBB129_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB129_2:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: mv a1, a0
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
-; CHECK-NEXT: vfmadd.vv v0, v16, v8
+; CHECK-NEXT: vfmadd.vv v0, v8, v16
; CHECK-NEXT: vmv.v.v v8, v0
; CHECK-NEXT: vmv8r.v v16, v24
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 5
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
index 42cde7d5f41bf..c3345c58ff373 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
@@ -1078,7 +1078,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
-; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a3, 24
@@ -1092,55 +1092,57 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: srli a3, a1, 3
-; CHECK-NEXT: slli a5, a1, 3
-; CHECK-NEXT: sub a6, a4, a1
-; CHECK-NEXT: add a7, a2, a5
-; CHECK-NEXT: add a5, a0, a5
-; CHECK-NEXT: vl8re64.v v8, (a7)
-; CHECK-NEXT: csrr a7, vlenb
-; CHECK-NEXT: slli a7, a7, 3
-; CHECK-NEXT: add a7, sp, a7
-; CHECK-NEXT: addi a7, a7, 16
-; CHECK-NEXT: vs8r.v v8, (a7) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: sltu a7, a4, a6
-; CHECK-NEXT: addi a7, a7, -1
-; CHECK-NEXT: vl8re64.v v8, (a5)
-; CHECK-NEXT: csrr a5, vlenb
-; CHECK-NEXT: li t0, 40
-; CHECK-NEXT: mul a5, a5, t0
-; CHECK-NEXT: add a5, sp, a5
-; CHECK-NEXT: addi a5, a5, 16
-; CHECK-NEXT: vs8r.v v8, (a5) # vscale x 64-byte Folded Spill
+; CHECK-NEXT: slli a3, a1, 3
+; CHECK-NEXT: sub a5, a4, a1
+; CHECK-NEXT: add a6, a0, a3
+; CHECK-NEXT: vl8re64.v v8, (a6)
+; CHECK-NEXT: csrr a6, vlenb
+; CHECK-NEXT: li a7, 40
+; CHECK-NEXT: mul a6, a6, a7
+; CHECK-NEXT: add a6, sp, a6
+; CHECK-NEXT: addi a6, a6, 16
+; CHECK-NEXT: vs8r.v v8, (a6) # vscale x 64-byte Folded Spill
+; CHECK-NEXT: sltu a6, a4, a5
+; CHECK-NEXT: addi a6, a6, -1
+; CHECK-NEXT: and a5, a6, a5
+; CHECK-NEXT: srli a6, a1, 3
+; CHECK-NEXT: add a3, a2, a3
; CHECK-NEXT: vl8re64.v v8, (a2)
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 4
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vl8re64.v v8, (a0)
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vslidedown.vx v0, v0, a3
-; CHECK-NEXT: and a0, a7, a6
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: li a3, 24
-; CHECK-NEXT: mul a2, a2, a3
-; CHECK-NEXT: add a2, sp, a2
-; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: vsetvli zero, a5, e64, m8, ta, ma
+; CHECK-NEXT: vle64.v v8, (a3)
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 3
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: li a3, 40
-; CHECK-NEXT: mul a2, a2, a3
-; CHECK-NEXT: add a2, sp, a2
-; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; CHECK-NEXT: vl8re64.v v8, (a0)
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v0, a6
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a2, 24
+; CHECK-NEXT: mul a0, a0, a2
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a2, 40
+; CHECK-NEXT: mul a0, a0, a2
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: vsetvli zero, a5, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a2, 40
@@ -1191,69 +1193,47 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 5
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: li a3, 24
-; CHECK-NEXT: mul a1, a1, a3
+; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: vl8re64.v v8, (a2)
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a3) # vscale x 64-byte Folded Spill
; CHECK-NEXT: slli a3, a1, 3
-; CHECK-NEXT: add a5, a2, a3
-; CHECK-NEXT: vl8re64.v v8, (a5)
-; CHECK-NEXT: csrr a5, vlenb
-; CHECK-NEXT: slli a5, a5, 3
-; CHECK-NEXT: add a5, sp, a5
-; CHECK-NEXT: addi a5, a5, 16
-; CHECK-NEXT: vs8r.v v8, (a5) # vscale x 64-byte Folded Spill
; CHECK-NEXT: sub a5, a4, a1
+; CHECK-NEXT: add a2, a2, a3
; CHECK-NEXT: add a3, a0, a3
-; CHECK-NEXT: vl8re64.v v24, (a3)
+; CHECK-NEXT: vl8re64.v v8, (a3)
; CHECK-NEXT: sltu a3, a4, a5
-; CHECK-NEXT: vl8re64.v v8, (a2)
-; CHECK-NEXT: addi a2, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vl8re64.v v0, (a0)
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a3, a3, a5
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; CHECK-NEXT: vfmadd.vv v24, v16, v8
+; CHECK-NEXT: vle64.v v24, (a2)
+; CHECK-NEXT: vl8re64.v v0, (a0)
+; CHECK-NEXT: vfmacc.vv v24, v16, v8
; CHECK-NEXT: bltu a4, a1, .LBB93_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB93_2:
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 24
-; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
-; CHECK-NEXT: vfmadd.vv v0, v16, v8
+; CHECK-NEXT: vfmadd.vv v0, v8, v16
; CHECK-NEXT: vmv.v.v v8, v0
; CHECK-NEXT: vmv8r.v v16, v24
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 5
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
index 4177672b3a306..ebdef45ae1920 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
@@ -119,7 +119,7 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
@@ -143,10 +143,13 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: addi a7, a7, -1
; CHECK-NEXT: and a4, a7, a4
; CHECK-NEXT: srli a7, a1, 2
-; CHECK-NEXT: vl8re64.v v8, (a6)
-; CHECK-NEXT: vslidedown.vx v24, v0, a7
+; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
+; CHECK-NEXT: vle64.v v8, (a6)
; CHECK-NEXT: vsetvli a6, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v6, v0, a5
+; CHECK-NEXT: vsetvli a6, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v24, v0, a7
+; CHECK-NEXT: vsetvli a6, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v24, a5
; CHECK-NEXT: bltu a0, a1, .LBB8_2
; CHECK-NEXT: # %bb.1:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
index d340c70d47c35..1fe6d3076d58c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
@@ -652,8 +652,8 @@ body: |
; CHECK-LABEL: name: vleff_reg_doesnt_dominate
; CHECK: liveins: $x8
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1 /* vl=VLMAX */, 3 /* e8 */, 3 /* ta, ma */
; CHECK-NEXT: %avl:gprnox0 = COPY $x8
+ ; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, %avl /* vl */, 3 /* e8 */, 3 /* ta, ma */
; CHECK-NEXT: %y:vr, %vl:gprnox0 = PseudoVLE8FF_V_M1 $noreg, $noreg, %avl /* vl */, 3 /* e8 */, 3 /* ta, ma */
; CHECK-NEXT: PseudoVSE8_V_M1 %x, $noreg, %vl /* vl */, 3 /* e8 */
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 3 /* ta, ma */
@@ -848,8 +848,8 @@ body: |
; CHECK-LABEL: name: vl_doesnt_dominate_sink
; CHECK: liveins: $x1
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1 /* vl=VLMAX */, 5 /* e32 */, 0 /* tu, mu */
; CHECK-NEXT: %vl:gprnox0 = COPY $x1
+ ; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, %vl /* vl */, 5 /* e32 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl /* vl */, 5 /* e32 */, 0 /* tu, mu */
; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 5 /* e32 */, 0
@@ -865,8 +865,8 @@ body: |
; CHECK-LABEL: name: vl_doesnt_dominate_sink_load
; CHECK: liveins: $x1
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: %x:vr = PseudoVLE32_V_M1 $noreg, $noreg, -1 /* vl=VLMAX */, 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size, align 1)
; CHECK-NEXT: %vl:gprnox0 = COPY $x1
+ ; CHECK-NEXT: %x:vr = PseudoVLE32_V_M1 $noreg, $noreg, %vl /* vl */, 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size, align 1)
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl /* vl */, 5 /* e32 */, 0 /* tu, mu */
; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVLE32_V_M1 $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size)
@@ -901,8 +901,8 @@ body: |
; CHECK-LABEL: name: vl_doesnt_dominate_can_sink_vxsat
; CHECK: liveins: $x1
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: %x:vr = PseudoVSADDU_VV_M1 $noreg, $noreg, $noreg, -1 /* vl=VLMAX */, 5 /* e32 */, 0 /* tu, mu */, implicit-def dead $vxsat
; CHECK-NEXT: %vl:gprnox0 = COPY $x1
+ ; CHECK-NEXT: %x:vr = PseudoVSADDU_VV_M1 $noreg, $noreg, $noreg, %vl /* vl */, 5 /* e32 */, 0 /* tu, mu */, implicit-def dead $vxsat
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl /* vl */, 5 /* e32 */, 0 /* tu, mu */
; CHECK-NEXT: $v8 = COPY %y
%x:vr = PseudoVSADDU_VV_M1 $noreg, $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */, implicit-def dead $vxsat
@@ -929,3 +929,41 @@ body: |
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 5 /* e32 */, 0
$v8 = COPY %y
...
+---
+name: vl_doesnt_dominate_cant_sink_other_user
+body: |
+ bb.0:
+ liveins: $x1
+ ; CHECK-LABEL: name: vl_doesnt_dominate_cant_sink_other_user
+ ; CHECK: liveins: $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1 /* vl=VLMAX */, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, -1 /* vl=VLMAX */, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %vl:gprnox0 = COPY $x1
+ ; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl /* vl */, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 5 /* e32 */, 0
+ %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, -1, 5 /* e32 */, 0
+ %vl:gprnox0 = COPY $x1
+ %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 5 /* e32 */, 0
+ $v8 = COPY %z
+...
+---
+name: vl_doesnt_dominate_can_sink_other_user
+body: |
+ bb.0:
+ liveins: $x1
+ ; CHECK-LABEL: name: vl_doesnt_dominate_can_sink_other_user
+ ; CHECK: liveins: $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %vl:gprnox0 = COPY $x1
+ ; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, %vl /* vl */, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, -1 /* vl=VLMAX */, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl /* vl */, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %z
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 5 /* e32 */, 0
+ %vl:gprnox0 = COPY $x1
+ %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, -1, 5 /* e32 */, 0
+ %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 5 /* e32 */, 0
+ $v8 = COPY %z
+...
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
index 99f3cf9912380..ebd92b3f4c8b6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
@@ -387,20 +387,20 @@ define <vscale x 64 x i8> @vmax_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %
define <vscale x 128 x i8> @vmax_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: csrr a3, vlenb
+; CHECK-NEXT: slli a3, a3, 3
+; CHECK-NEXT: sub a4, a2, a3
+; CHECK-NEXT: sltu a5, a2, a4
+; CHECK-NEXT: addi a5, a5, -1
+; CHECK-NEXT: and a4, a5, a4
+; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub a3, a2, a1
-; CHECK-NEXT: sltu a4, a2, a3
-; CHECK-NEXT: addi a4, a4, -1
-; CHECK-NEXT: and a3, a4, a3
-; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t
-; CHECK-NEXT: bltu a2, a1, .LBB34_2
+; CHECK-NEXT: bltu a2, a3, .LBB34_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a2, a1
+; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB34_2:
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
index ea9cf170dd35d..496af57f55c2a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
@@ -389,20 +389,20 @@ define <vscale x 64 x i8> @vmaxu_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8
define <vscale x 128 x i8> @vmaxu_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: csrr a3, vlenb
+; CHECK-NEXT: slli a3, a3, 3
+; CHECK-NEXT: sub a4, a2, a3
+; CHECK-NEXT: sltu a5, a2, a4
+; CHECK-NEXT: addi a5, a5, -1
+; CHECK-NEXT: and a4, a5, a4
+; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub a3, a2, a1
-; CHECK-NEXT: sltu a4, a2, a3
-; CHECK-NEXT: addi a4, a4, -1
-; CHECK-NEXT: and a3, a4, a3
-; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t
-; CHECK-NEXT: bltu a2, a1, .LBB34_2
+; CHECK-NEXT: bltu a2, a3, .LBB34_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a2, a1
+; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB34_2:
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
index 4c6995f3b6b3f..3460fb11043c6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
@@ -387,20 +387,20 @@ define <vscale x 64 x i8> @vmin_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %
define <vscale x 128 x i8> @vmin_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: csrr a3, vlenb
+; CHECK-NEXT: slli a3, a3, 3
+; CHECK-NEXT: sub a4, a2, a3
+; CHECK-NEXT: sltu a5, a2, a4
+; CHECK-NEXT: addi a5, a5, -1
+; CHECK-NEXT: and a4, a5, a4
+; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub a3, a2, a1
-; CHECK-NEXT: sltu a4, a2, a3
-; CHECK-NEXT: addi a4, a4, -1
-; CHECK-NEXT: and a3, a4, a3
-; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t
-; CHECK-NEXT: bltu a2, a1, .LBB34_2
+; CHECK-NEXT: bltu a2, a3, .LBB34_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a2, a1
+; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB34_2:
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
index 75b5aa84fc42f..ca941a223c196 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
@@ -389,20 +389,20 @@ define <vscale x 64 x i8> @vminu_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8
define <vscale x 128 x i8> @vminu_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: csrr a3, vlenb
+; CHECK-NEXT: slli a3, a3, 3
+; CHECK-NEXT: sub a4, a2, a3
+; CHECK-NEXT: sltu a5, a2, a4
+; CHECK-NEXT: addi a5, a5, -1
+; CHECK-NEXT: and a4, a5, a4
+; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub a3, a2, a1
-; CHECK-NEXT: sltu a4, a2, a3
-; CHECK-NEXT: addi a4, a4, -1
-; CHECK-NEXT: and a3, a4, a3
-; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vminu.vx v16, v16, a0, v0.t
-; CHECK-NEXT: bltu a2, a1, .LBB34_2
+; CHECK-NEXT: bltu a2, a3, .LBB34_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a2, a1
+; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB34_2:
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-vector-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/vp-vector-interleaved-access.ll
index 20a055845ee43..36d1aee693093 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-vector-interleaved-access.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-vector-interleaved-access.ll
@@ -416,9 +416,9 @@ define i32 @masked_load_store_factor2_v2_shared_mask_extract(<vscale x 2 x i1> %
; RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
; RV64-NEXT: vslideup.vx v10, v9, a2
; RV64-NEXT: slli a2, a1, 33
-; RV64-NEXT: vmsne.vi v0, v10, 0
; RV64-NEXT: srli a2, a2, 32
-; RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma
+; RV64-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
+; RV64-NEXT: vmsne.vi v0, v10, 0
; RV64-NEXT: vle32.v v10, (a0), v0.t
; RV64-NEXT: li a2, 32
; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -550,6 +550,7 @@ define {<vscale x 2 x i32>, <vscale x 2 x i32>} @not_same_mask(<vscale x 2 x i1>
; RV64-NEXT: vwaddu.vv v12, v9, v11
; RV64-NEXT: vwmaccu.vx v12, a2, v11
; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a1, a1, 33
; RV64-NEXT: srli a2, a2, 2
; RV64-NEXT: vmsne.vi v0, v12, 0
; RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
@@ -557,13 +558,12 @@ define {<vscale x 2 x i32>, <vscale x 2 x i32>} @not_same_mask(<vscale x 2 x i1>
; RV64-NEXT: vmerge.vim v10, v10, 1, v0
; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
; RV64-NEXT: vmsne.vi v0, v9, 0
-; RV64-NEXT: slli a1, a1, 33
+; RV64-NEXT: srli a1, a1, 32
; RV64-NEXT: vmerge.vim v8, v8, 1, v0
; RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
; RV64-NEXT: vslideup.vx v10, v8, a2
+; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; RV64-NEXT: vmsne.vi v0, v10, 0
-; RV64-NEXT: srli a1, a1, 32
-; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV64-NEXT: vle32.v v10, (a0), v0.t
; RV64-NEXT: li a0, 32
; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
index a075bba81d3c6..2ce108a8fe182 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
@@ -2434,19 +2434,20 @@ define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vs
;
; RV64-LABEL: vpgather_baseidx_nxv16i16_nxv16f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64-NEXT: vmv1r.v v12, v0
-; RV64-NEXT: vsext.vf4 v16, v10
; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: vsll.vi v16, v16, 3
; RV64-NEXT: sub a3, a1, a2
-; RV64-NEXT: srli a4, a2, 3
-; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
-; RV64-NEXT: vslidedown.vx v0, v0, a4
; RV64-NEXT: sltu a4, a1, a3
; RV64-NEXT: addi a4, a4, -1
; RV64-NEXT: and a3, a4, a3
+; RV64-NEXT: srli a4, a2, 3
; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; RV64-NEXT: vsext.vf4 v16, v10
+; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
+; RV64-NEXT: vslidedown.vx v0, v0, a4
+; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; RV64-NEXT: vsll.vi v16, v16, 3
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: vsetvli a3, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v24, v8
@@ -2491,19 +2492,20 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base
;
; RV64-LABEL: vpgather_baseidx_sext_nxv16i16_nxv16f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64-NEXT: vmv1r.v v12, v0
-; RV64-NEXT: vsext.vf4 v16, v10
; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: vsll.vi v16, v16, 3
; RV64-NEXT: sub a3, a1, a2
-; RV64-NEXT: srli a4, a2, 3
-; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
-; RV64-NEXT: vslidedown.vx v0, v0, a4
; RV64-NEXT: sltu a4, a1, a3
; RV64-NEXT: addi a4, a4, -1
; RV64-NEXT: and a3, a4, a3
+; RV64-NEXT: srli a4, a2, 3
; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; RV64-NEXT: vsext.vf4 v16, v10
+; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
+; RV64-NEXT: vslidedown.vx v0, v0, a4
+; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; RV64-NEXT: vsll.vi v16, v16, 3
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: vsetvli a3, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v24, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
index f92ee37051840..bae48bb580a31 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
@@ -537,21 +537,22 @@ define <vscale x 128 x i8> @vpmerge_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: vlm.v v0, (a2)
; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a2, a0, a1
; CHECK-NEXT: sub a4, a3, a1
+; CHECK-NEXT: sltu a5, a3, a4
+; CHECK-NEXT: addi a5, a5, -1
+; CHECK-NEXT: and a4, a5, a4
+; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma
+; CHECK-NEXT: vlm.v v0, (a2)
+; CHECK-NEXT: add a2, a0, a1
; CHECK-NEXT: vl8r.v v24, (a2)
-; CHECK-NEXT: sltu a2, a3, a4
; CHECK-NEXT: vl8r.v v8, (a0)
-; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a4
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, tu, ma
+; CHECK-NEXT: vsetvli zero, zero, e8, m8, tu, ma
; CHECK-NEXT: vmerge.vvm v24, v24, v16, v0
; CHECK-NEXT: bltu a3, a1, .LBB35_2
; CHECK-NEXT: # %bb.1:
@@ -577,20 +578,21 @@ define <vscale x 128 x i8> @vpmerge_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
define <vscale x 128 x i8> @vpmerge_vx_nxv128i8(i8 %a, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpmerge_vx_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e8, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: csrr a3, vlenb
+; CHECK-NEXT: slli a3, a3, 3
+; CHECK-NEXT: sub a4, a2, a3
+; CHECK-NEXT: sltu a5, a2, a4
+; CHECK-NEXT: addi a5, a5, -1
+; CHECK-NEXT: and a4, a5, a4
+; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a1)
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub a3, a2, a1
-; CHECK-NEXT: sltu a4, a2, a3
-; CHECK-NEXT: addi a4, a4, -1
-; CHECK-NEXT: and a3, a4, a3
-; CHECK-NEXT: vsetvli zero, a3, e8, m8, tu, ma
+; CHECK-NEXT: vsetvli zero, zero, e8, m8, tu, ma
; CHECK-NEXT: vmerge.vxm v16, v16, a0, v0
-; CHECK-NEXT: bltu a2, a1, .LBB36_2
+; CHECK-NEXT: bltu a2, a3, .LBB36_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a2, a1
+; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB36_2:
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a2, e8, m8, tu, ma
@@ -605,20 +607,21 @@ define <vscale x 128 x i8> @vpmerge_vx_nxv128i8(i8 %a, <vscale x 128 x i8> %vb,
define <vscale x 128 x i8> @vpmerge_vi_nxv128i8(<vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpmerge_vi_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub a3, a1, a2
+; CHECK-NEXT: sltu a4, a1, a3
+; CHECK-NEXT: addi a4, a4, -1
+; CHECK-NEXT: and a3, a4, a3
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: sub a2, a1, a0
-; CHECK-NEXT: sltu a3, a1, a2
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, tu, ma
+; CHECK-NEXT: vsetvli zero, zero, e8, m8, tu, ma
; CHECK-NEXT: vmerge.vim v16, v16, 2, v0
-; CHECK-NEXT: bltu a1, a0, .LBB37_2
+; CHECK-NEXT: bltu a1, a2, .LBB37_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB37_2:
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
index 98634fe55de41..07a9384ac4bae 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
@@ -551,20 +551,20 @@ define <vscale x 64 x i8> @vsadd_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32
define <vscale x 128 x i8> @vsadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsadd_vi_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub a3, a1, a2
+; CHECK-NEXT: sltu a4, a1, a3
+; CHECK-NEXT: addi a4, a4, -1
+; CHECK-NEXT: and a3, a4, a3
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: sub a2, a1, a0
-; CHECK-NEXT: sltu a3, a1, a2
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vsadd.vi v16, v16, -1, v0.t
-; CHECK-NEXT: bltu a1, a0, .LBB50_2
+; CHECK-NEXT: bltu a1, a2, .LBB50_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB50_2:
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
index a7d304261f87f..5750daa21c6e2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
@@ -550,20 +550,20 @@ define <vscale x 64 x i8> @vsaddu_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i3
define <vscale x 128 x i8> @vsaddu_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsaddu_vi_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub a3, a1, a2
+; CHECK-NEXT: sltu a4, a1, a3
+; CHECK-NEXT: addi a4, a4, -1
+; CHECK-NEXT: and a3, a4, a3
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: sub a2, a1, a0
-; CHECK-NEXT: sltu a3, a1, a2
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vsaddu.vi v16, v16, -1, v0.t
-; CHECK-NEXT: bltu a1, a0, .LBB50_2
+; CHECK-NEXT: bltu a1, a2, .LBB50_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB50_2:
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
index c05f209a1bb0e..8f00bd642feb2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
@@ -303,20 +303,22 @@ define <vscale x 32 x i32> @select_nxv32i32(<vscale x 32 x i1> %a, <vscale x 32
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a3, vlenb
-; CHECK-NEXT: slli a4, a3, 3
; CHECK-NEXT: slli a1, a3, 1
+; CHECK-NEXT: sub a4, a2, a1
+; CHECK-NEXT: sltu a5, a2, a4
+; CHECK-NEXT: addi a5, a5, -1
+; CHECK-NEXT: and a4, a5, a4
+; CHECK-NEXT: slli a5, a3, 3
+; CHECK-NEXT: add a5, a0, a5
+; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma
+; CHECK-NEXT: vle32.v v24, (a5)
; CHECK-NEXT: srli a3, a3, 2
-; CHECK-NEXT: add a4, a0, a4
-; CHECK-NEXT: sub a5, a2, a1
-; CHECK-NEXT: vl8re32.v v24, (a4)
-; CHECK-NEXT: sltu a4, a2, a5
-; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: vl8re32.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a3
-; CHECK-NEXT: and a4, a4, a5
; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma
; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0
; CHECK-NEXT: bltu a2, a1, .LBB27_2
@@ -343,21 +345,22 @@ define <vscale x 32 x i32> @select_evl_nxv32i32(<vscale x 32 x i1> %a, <vscale x
; RV32-LABEL: select_evl_nxv32i32:
; RV32: # %bb.0:
; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 3
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV32-NEXT: vle32.v v24, (a0)
-; RV32-NEXT: vmerge.vvm v8, v24, v8, v0
-; RV32-NEXT: slli a2, a1, 3
+; RV32-NEXT: slli a3, a1, 1
; RV32-NEXT: add a0, a0, a2
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: vl8re32.v v24, (a0)
-; RV32-NEXT: srli a0, a1, 2
-; RV32-NEXT: sub a2, a1, a2
-; RV32-NEXT: sltu a1, a1, a2
-; RV32-NEXT: addi a1, a1, -1
-; RV32-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
-; RV32-NEXT: vslidedown.vx v0, v0, a0
-; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: sub a2, a1, a3
+; RV32-NEXT: sltu a3, a1, a2
+; RV32-NEXT: addi a3, a3, -1
+; RV32-NEXT: vmerge.vvm v8, v24, v8, v0
+; RV32-NEXT: and a2, a3, a2
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vle32.v v24, (a0)
+; RV32-NEXT: srli a1, a1, 2
+; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; RV32-NEXT: vslidedown.vx v0, v0, a1
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vmerge.vvm v16, v24, v16, v0
; RV32-NEXT: ret
;
@@ -371,20 +374,22 @@ define <vscale x 32 x i32> @select_evl_nxv32i32(<vscale x 32 x i1> %a, <vscale x
; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; RV64-NEXT: addi a1, sp, 16
; RV64-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
-; RV64-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64-NEXT: vmv1r.v v7, v0
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a3, a1, 3
; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: sub a3, a1, a2
+; RV64-NEXT: sltu a4, a1, a3
+; RV64-NEXT: addi a4, a4, -1
+; RV64-NEXT: and a3, a4, a3
+; RV64-NEXT: slli a4, a1, 3
+; RV64-NEXT: add a4, a0, a4
+; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma
+; RV64-NEXT: vle32.v v24, (a4)
; RV64-NEXT: srli a4, a1, 2
-; RV64-NEXT: add a3, a0, a3
-; RV64-NEXT: sub a5, a1, a2
-; RV64-NEXT: vl8re32.v v24, (a3)
-; RV64-NEXT: sltu a3, a1, a5
-; RV64-NEXT: addi a3, a3, -1
; RV64-NEXT: vl8re32.v v8, (a0)
+; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a4
-; RV64-NEXT: and a3, a3, a5
; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; RV64-NEXT: vmerge.vvm v16, v24, v16, v0
; RV64-NEXT: bltu a1, a2, .LBB28_2
@@ -632,20 +637,22 @@ define <vscale x 16 x double> @select_nxv16f64(<vscale x 16 x i1> %a, <vscale x
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a3, a1, 3
-; CHECK-NEXT: sub a4, a2, a1
-; CHECK-NEXT: add a3, a0, a3
-; CHECK-NEXT: sltu a5, a2, a4
-; CHECK-NEXT: vl8re64.v v24, (a3)
-; CHECK-NEXT: addi a5, a5, -1
-; CHECK-NEXT: srli a3, a1, 3
+; CHECK-NEXT: sub a3, a2, a1
+; CHECK-NEXT: sltu a4, a2, a3
+; CHECK-NEXT: addi a4, a4, -1
+; CHECK-NEXT: and a3, a4, a3
+; CHECK-NEXT: slli a4, a1, 3
+; CHECK-NEXT: add a4, a0, a4
+; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; CHECK-NEXT: vle64.v v24, (a4)
+; CHECK-NEXT: srli a4, a1, 3
; CHECK-NEXT: vl8re64.v v8, (a0)
-; CHECK-NEXT: vslidedown.vx v0, v0, a3
-; CHECK-NEXT: and a4, a5, a4
-; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v0, a4
+; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0
; CHECK-NEXT: bltu a2, a1, .LBB50_2
; CHECK-NEXT: # %bb.1:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
index 66d057c972c92..a3bdf4de5d46c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
@@ -567,10 +567,10 @@ bb:
define void @add_v128i8(ptr %x, ptr %y) vscale_range(2,2) {
; CHECK-LABEL: add_v128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl8r.v v8, (a0)
-; CHECK-NEXT: vl8r.v v16, (a1)
+; CHECK-NEXT: vl8r.v v8, (a1)
+; CHECK-NEXT: vl8r.v v16, (a0)
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vadd.vv v8, v16, v8
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: ret
%a = load <128 x i8>, ptr %x
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
index 0ac2ef7e251c0..5b5fe3dd1c10f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
@@ -568,25 +568,25 @@ define <vscale x 64 x i8> @vssub_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32
define <vscale x 128 x i8> @vssub_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vssub_vi_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: sub a2, a1, a0
-; CHECK-NEXT: sltu a3, a1, a2
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a3, a3, a2
-; CHECK-NEXT: li a2, -1
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub a3, a1, a2
+; CHECK-NEXT: sltu a4, a1, a3
+; CHECK-NEXT: addi a4, a4, -1
+; CHECK-NEXT: and a3, a4, a3
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
-; CHECK-NEXT: vssub.vx v16, v16, a2, v0.t
-; CHECK-NEXT: bltu a1, a0, .LBB50_2
+; CHECK-NEXT: vlm.v v0, (a0)
+; CHECK-NEXT: li a0, -1
+; CHECK-NEXT: vssub.vx v16, v16, a0, v0.t
+; CHECK-NEXT: bltu a1, a2, .LBB50_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB50_2:
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a2, v0.t
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 128 x i8> @llvm.vp.ssub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
ret <vscale x 128 x i8> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
index bde279a4d1f2b..7807b7ac43460 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
@@ -566,25 +566,25 @@ define <vscale x 64 x i8> @vssubu_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i3
define <vscale x 128 x i8> @vssubu_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vssubu_vi_nxv128i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: sub a2, a1, a0
-; CHECK-NEXT: sltu a3, a1, a2
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a3, a3, a2
-; CHECK-NEXT: li a2, -1
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub a3, a1, a2
+; CHECK-NEXT: sltu a4, a1, a3
+; CHECK-NEXT: addi a4, a4, -1
+; CHECK-NEXT: and a3, a4, a3
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
-; CHECK-NEXT: vssubu.vx v16, v16, a2, v0.t
-; CHECK-NEXT: bltu a1, a0, .LBB50_2
+; CHECK-NEXT: vlm.v v0, (a0)
+; CHECK-NEXT: li a0, -1
+; CHECK-NEXT: vssubu.vx v16, v16, a0, v0.t
+; CHECK-NEXT: bltu a1, a2, .LBB50_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB50_2:
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a2, v0.t
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 128 x i8> @llvm.vp.usub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
ret <vscale x 128 x i8> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
index 0c1ca369521f7..6414717ff74a5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
@@ -264,7 +264,7 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
@@ -288,10 +288,13 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: addi a7, a7, -1
; CHECK-NEXT: and a4, a7, a4
; CHECK-NEXT: srli a7, a1, 2
-; CHECK-NEXT: vl8re64.v v8, (a6)
-; CHECK-NEXT: vslidedown.vx v24, v0, a7
+; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
+; CHECK-NEXT: vle64.v v8, (a6)
; CHECK-NEXT: vsetvli a6, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v6, v0, a5
+; CHECK-NEXT: vsetvli a6, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v24, v0, a7
+; CHECK-NEXT: vsetvli a6, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v24, a5
; CHECK-NEXT: bltu a0, a1, .LBB17_2
; CHECK-NEXT: # %bb.1:
>From 4dc5ba73b1a46458facc438e7c30642ec659b5a3 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Tue, 3 Mar 2026 14:57:16 +0800
Subject: [PATCH 3/5] Only check VL dominates users in same BB
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 6 ++-
llvm/test/CodeGen/RISCV/rvv/vl-opt.mir | 43 ++++++++++++++++++++++
2 files changed, 48 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 14f10f9b11128..090e126ec26d7 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -1261,8 +1261,12 @@ bool RISCVVLOptimizer::tryReduceVL(MachineInstr &MI,
};
if (!VLDominates(MI)) {
assert(MI.getNumExplicitDefs() == 1);
+ auto Uses = MRI->use_instructions(MI.getOperand(0).getReg());
+ auto UsesSameBB = make_filter_range(Uses, [&MI](MachineInstr &Use) {
+ return Use.getParent() == MI.getParent();
+ });
if (VLMI->getParent() == MI.getParent() &&
- all_of(MRI->use_instructions(MI.getOperand(0).getReg()), VLDominates) &&
+ all_of(UsesSameBB, VLDominates) &&
RISCVInstrInfo::isSafeToMove(MI, *VLMI->getNextNode())) {
MI.moveBefore(VLMI->getNextNode());
} else {
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
index 1fe6d3076d58c..2bd5d2109d797 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
@@ -967,3 +967,46 @@ body: |
%z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 5 /* e32 */, 0
$v8 = COPY %z
...
+---
+# Although %vl doesn't dominate all uses of %x, we only need it to dominate the uses of %x in %bb.1 in order to sink it.
+name: vl_doesnt_dominate_sink_use_in_other_block
+body: |
+ ; CHECK-LABEL: name: vl_doesnt_dominate_sink_use_in_other_block
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: BEQ $x1, $x0, %bb.1
+ ; CHECK-NEXT: PseudoBR %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %vl:gprnox0 = COPY $x1
+ ; CHECK-NEXT: %x:vr = PseudoVLE32_V_M1 $noreg, $noreg, %vl /* vl */, 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size, align 1)
+ ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl /* vl */, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: $v8 = COPY %y
+ ; CHECK-NEXT: PseudoBR %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: PseudoBR %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0 /* vl */, 5 /* e32 */, 0 /* tu, mu */
+ bb.0:
+ liveins: $x1
+ BEQ $x1, $x0, %bb.1
+ PseudoBR %bb.2
+ bb.1:
+ %x:vr = PseudoVLE32_V_M1 $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size)
+ %vl:gprnox0 = COPY $x1
+ %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 5 /* e32 */, 0
+ $v8 = COPY %y
+ PseudoBR %bb.3
+ bb.2:
+ PseudoBR %bb.3
+ bb.3:
+ %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 5 /* e32 */, 0
+...
>From 70e7fb2f52b7a0849bff99b2c3e7297350dec896 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Tue, 3 Mar 2026 15:01:07 +0800
Subject: [PATCH 4/5] Use a phi to maintain SSA
---
llvm/test/CodeGen/RISCV/rvv/vl-opt.mir | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
index 2bd5d2109d797..9db7221357134 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
@@ -970,6 +970,7 @@ body: |
---
# Although %vl doesn't dominate all uses of %x, we only need it to dominate the uses of %x in %bb.1 in order to sink it.
name: vl_doesnt_dominate_sink_use_in_other_block
+tracksRegLiveness: true
body: |
; CHECK-LABEL: name: vl_doesnt_dominate_sink_use_in_other_block
; CHECK: bb.0:
@@ -981,8 +982,9 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: liveins: $x2
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: %vl:gprnox0 = COPY $x1
+ ; CHECK-NEXT: %vl:gprnox0 = COPY $x2
; CHECK-NEXT: %x:vr = PseudoVLE32_V_M1 $noreg, $noreg, %vl /* vl */, 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size, align 1)
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl /* vl */, 5 /* e32 */, 0 /* tu, mu */
; CHECK-NEXT: $v8 = COPY %y
@@ -991,22 +993,25 @@ body: |
; CHECK-NEXT: bb.2:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %z:vr = COPY $noreg
; CHECK-NEXT: PseudoBR %bb.3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3:
- ; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0 /* vl */, 5 /* e32 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %phi:vr = PHI %x, %bb.1, %z, %bb.2
bb.0:
liveins: $x1
BEQ $x1, $x0, %bb.1
PseudoBR %bb.2
bb.1:
+ liveins: $x2
%x:vr = PseudoVLE32_V_M1 $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size)
- %vl:gprnox0 = COPY $x1
+ %vl:gprnox0 = COPY $x2
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 5 /* e32 */, 0
$v8 = COPY %y
PseudoBR %bb.3
bb.2:
+ %z:vr = COPY $noreg
PseudoBR %bb.3
bb.3:
- %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 5 /* e32 */, 0
+ %phi:vr = PHI %x, %bb.1, %z, %bb.2
...
>From 61a24ad4cf9a9d135f46ed8296fcf3379b2fa2d2 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Wed, 4 Mar 2026 11:39:51 +0800
Subject: [PATCH 5/5] Add const to references
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 1a81a6115c214..948247645bca6 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -1272,13 +1272,13 @@ bool RISCVVLOptimizer::tryReduceVL(MachineInstr &MI,
return true;
}
MachineInstr *VLMI = MRI->getVRegDef(CommonVL.getReg());
- auto VLDominates = [this, &VLMI](MachineInstr &MI) {
+ auto VLDominates = [this, &VLMI](const MachineInstr &MI) {
return MDT->dominates(VLMI, &MI);
};
if (!VLDominates(MI)) {
assert(MI.getNumExplicitDefs() == 1);
auto Uses = MRI->use_instructions(MI.getOperand(0).getReg());
- auto UsesSameBB = make_filter_range(Uses, [&MI](MachineInstr &Use) {
+ auto UsesSameBB = make_filter_range(Uses, [&MI](const MachineInstr &Use) {
return Use.getParent() == MI.getParent();
});
if (VLMI->getParent() == MI.getParent() &&
More information about the llvm-commits
mailing list