[llvm] [RISCV] Rematerialize vmv.v.x (PR #107993)
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 10 02:45:51 PDT 2024
https://github.com/lukel97 created https://github.com/llvm/llvm-project/pull/107993
Even though vmv.v.x has a non constant scalar operand, we can still rematerialize it because we have split register allocation between vectors and scalars.
Program regalloc.NumSpills regalloc.NumReloads regalloc.NumReMaterialization
lhs rhs diff lhs rhs diff lhs rhs diff
657.xz_s 289.00 292.00 1.0% 505.00 484.00 -4.2% 613.00 612.00 -0.2%
557.xz_r 289.00 292.00 1.0% 505.00 484.00 -4.2% 613.00 612.00 -0.2%
505.mcf_r 141.00 141.00 0.0% 372.00 372.00 0.0% 123.00 123.00 0.0%
641.leela_s 356.00 356.00 0.0% 525.00 525.00 0.0% 801.00 801.00 0.0%
625.x264_s 1886.00 1886.00 0.0% 4561.00 4561.00 0.0% 2108.00 2108.00 0.0%
623.xalancbmk_s 1548.00 1548.00 0.0% 2466.00 2466.00 0.0% 13983.00 13983.00 0.0%
620.omnetpp_s 946.00 946.00 0.0% 1485.00 1485.00 0.0% 8413.00 8413.00 0.0%
605.mcf_s 141.00 141.00 0.0% 372.00 372.00 0.0% 123.00 123.00 0.0%
541.leela_r 356.00 356.00 0.0% 525.00 525.00 0.0% 801.00 801.00 0.0%
525.x264_r 1886.00 1886.00 0.0% 4561.00 4561.00 0.0% 2108.00 2108.00 0.0%
510.parest_r 42740.00 42740.00 0.0% 82400.00 82400.00 0.0% 65165.00 65165.00 0.0%
520.omnetpp_r 946.00 946.00 0.0% 1485.00 1485.00 0.0% 8413.00 8413.00 0.0%
508.namd_r 6598.00 6598.00 0.0% 15509.00 15509.00 0.0% 3164.00 3164.00 0.0%
644.nab_s 753.00 753.00 0.0% 1183.00 1183.00 0.0% 1559.00 1559.00 0.0%
619.lbm_s 68.00 68.00 0.0% 70.00 70.00 0.0% 20.00 20.00 0.0%
544.nab_r 753.00 753.00 0.0% 1183.00 1183.00 0.0% 1559.00 1559.00 0.0%
519.lbm_r 73.00 73.00 0.0% 75.00 75.00 0.0% 18.00 18.00 0.0%
511.povray_r 1937.00 1937.00 0.0% 3629.00 3629.00 0.0% 4914.00 4914.00 0.0%
523.xalancbmk_r 1548.00 1548.00 0.0% 2466.00 2466.00 0.0% 13983.00 13983.00 0.0%
502.gcc_r 12450.00 12446.00 -0.0% 27328.00 27312.00 -0.1% 50527.00 50533.00 0.0%
602.gcc_s 12450.00 12446.00 -0.0% 27328.00 27312.00 -0.1% 50527.00 50533.00 0.0%
500.perlbench_r 4178.00 4175.00 -0.1% 9162.00 9061.00 -1.1% 10223.00 10392.00 1.7%
600.perlbench_s 4178.00 4175.00 -0.1% 9162.00 9061.00 -1.1% 10223.00 10392.00 1.7%
526.blender_r 13105.00 13081.00 -0.2% 26478.00 26438.00 -0.2% 65188.00 65230.00 0.1%
638.imagick_s 4181.00 4157.00 -0.6% 11342.00 11316.00 -0.2% 10884.00 10938.00 0.5%
538.imagick_r 4181.00 4157.00 -0.6% 11342.00 11316.00 -0.2% 10884.00 10938.00 0.5%
531.deepsjeng_r 353.00 345.00 -2.3% 682.00 674.00 -1.2% 530.00 538.00 1.5%
631.deepsjeng_s 353.00 345.00 -2.3% 682.00 674.00 -1.2% 530.00 538.00 1.5%
Geomean difference -0.1% -0.5% 0.3%
The slight increase in spills in the xz benchmarks are from scalar spills (presumably due to more uses of the scalar operand affecting spill weights), we still manage to remove some vector spills in it too.
InlineSpiller will check to make sure that the scalar operand is live at the point where the rematerialization occurs, so this won't extend any scalar live ranges. However this also means we may not be able to rematerialize in some cases, as shown in @vmv.v.x_needs_extended.
It might be worthwhile teaching InlineSpiller to extend scalar live ranges in a future patch. I experimented with this locally and it reduced spills on 531.deepsjeng_r by a further 3%.
>From 436e0d1032257c1445158ef7bd6096373f12d1e1 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Tue, 10 Sep 2024 16:11:56 +0800
Subject: [PATCH 1/2] Precommit test
---
llvm/test/CodeGen/RISCV/rvv/remat.ll | 94 ++++++++++++++++++++++++++++
1 file changed, 94 insertions(+)
diff --git a/llvm/test/CodeGen/RISCV/rvv/remat.ll b/llvm/test/CodeGen/RISCV/rvv/remat.ll
index 2b12249378eb1f..2b12b88adba423 100644
--- a/llvm/test/CodeGen/RISCV/rvv/remat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/remat.ll
@@ -171,3 +171,97 @@ define void @vmv.v.i(ptr %p) {
store volatile <vscale x 8 x i64> %vmv.v.i, ptr %p
ret void
}
+
+; The live range of %x needs extended down to the use of vmv.v.x at the end of
+; the block.
+define void @vmv.v.x_needs_extended(ptr %p, i64 %x) {
+; CHECK-LABEL: vmv.v.x_needs_extended:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub sp, sp, a2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vl8re64.v v16, (a0)
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8re64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v0, (a0)
+; CHECK-NEXT: vl8re64.v v16, (a0)
+; CHECK-NEXT: vs8r.v v16, (a0)
+; CHECK-NEXT: vs8r.v v0, (a0)
+; CHECK-NEXT: vs8r.v v24, (a0)
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vs8r.v v16, (a0)
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %vmv.v.x = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(<vscale x 8 x i64> poison, i64 %x, i64 -1)
+ store volatile <vscale x 8 x i64> %vmv.v.x, ptr %p
+
+ %a = load volatile <vscale x 8 x i64>, ptr %p
+ %b = load volatile <vscale x 8 x i64>, ptr %p
+ %c = load volatile <vscale x 8 x i64>, ptr %p
+ %d = load volatile <vscale x 8 x i64>, ptr %p
+ store volatile <vscale x 8 x i64> %d, ptr %p
+ store volatile <vscale x 8 x i64> %c, ptr %p
+ store volatile <vscale x 8 x i64> %b, ptr %p
+ store volatile <vscale x 8 x i64> %a, ptr %p
+
+ store volatile <vscale x 8 x i64> %vmv.v.x, ptr %p
+ ret void
+}
+
+define void @vmv.v.x_live(ptr %p, i64 %x) {
+; CHECK-LABEL: vmv.v.x_live:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub sp, sp, a2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vl8re64.v v16, (a0)
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8re64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v0, (a0)
+; CHECK-NEXT: vl8re64.v v16, (a0)
+; CHECK-NEXT: vs8r.v v16, (a0)
+; CHECK-NEXT: vs8r.v v0, (a0)
+; CHECK-NEXT: vs8r.v v24, (a0)
+; CHECK-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: vs8r.v v16, (a0)
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: sd a1, 0(a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %vmv.v.x = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(<vscale x 8 x i64> poison, i64 %x, i64 -1)
+ store volatile <vscale x 8 x i64> %vmv.v.x, ptr %p
+
+ %a = load volatile <vscale x 8 x i64>, ptr %p
+ %b = load volatile <vscale x 8 x i64>, ptr %p
+ %c = load volatile <vscale x 8 x i64>, ptr %p
+ %d = load volatile <vscale x 8 x i64>, ptr %p
+ store volatile <vscale x 8 x i64> %d, ptr %p
+ store volatile <vscale x 8 x i64> %c, ptr %p
+ store volatile <vscale x 8 x i64> %b, ptr %p
+ store volatile <vscale x 8 x i64> %a, ptr %p
+
+ store volatile <vscale x 8 x i64> %vmv.v.x, ptr %p
+ store volatile i64 %x, ptr %p
+ ret void
+}
>From 2d7701d6f11b31f9660f43a889470a75f21f350b Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Tue, 10 Sep 2024 17:25:32 +0800
Subject: [PATCH 2/2] [RISCV] Rematerialize vmv.v.x
Even though vmv.v.x has a non constant scalar operand, because we have split register allocation between vectors and scalars on RISC-V we can rematerialize it.
Program regalloc.NumSpills regalloc.NumReloads regalloc.NumReMaterialization
lhs rhs diff lhs rhs diff lhs rhs diff
657.xz_s 289.00 292.00 1.0% 505.00 484.00 -4.2% 613.00 612.00 -0.2%
557.xz_r 289.00 292.00 1.0% 505.00 484.00 -4.2% 613.00 612.00 -0.2%
505.mcf_r 141.00 141.00 0.0% 372.00 372.00 0.0% 123.00 123.00 0.0%
641.leela_s 356.00 356.00 0.0% 525.00 525.00 0.0% 801.00 801.00 0.0%
625.x264_s 1886.00 1886.00 0.0% 4561.00 4561.00 0.0% 2108.00 2108.00 0.0%
623.xalancbmk_s 1548.00 1548.00 0.0% 2466.00 2466.00 0.0% 13983.00 13983.00 0.0%
620.omnetpp_s 946.00 946.00 0.0% 1485.00 1485.00 0.0% 8413.00 8413.00 0.0%
605.mcf_s 141.00 141.00 0.0% 372.00 372.00 0.0% 123.00 123.00 0.0%
541.leela_r 356.00 356.00 0.0% 525.00 525.00 0.0% 801.00 801.00 0.0%
525.x264_r 1886.00 1886.00 0.0% 4561.00 4561.00 0.0% 2108.00 2108.00 0.0%
510.parest_r 42740.00 42740.00 0.0% 82400.00 82400.00 0.0% 65165.00 65165.00 0.0%
520.omnetpp_r 946.00 946.00 0.0% 1485.00 1485.00 0.0% 8413.00 8413.00 0.0%
508.namd_r 6598.00 6598.00 0.0% 15509.00 15509.00 0.0% 3164.00 3164.00 0.0%
644.nab_s 753.00 753.00 0.0% 1183.00 1183.00 0.0% 1559.00 1559.00 0.0%
619.lbm_s 68.00 68.00 0.0% 70.00 70.00 0.0% 20.00 20.00 0.0%
544.nab_r 753.00 753.00 0.0% 1183.00 1183.00 0.0% 1559.00 1559.00 0.0%
519.lbm_r 73.00 73.00 0.0% 75.00 75.00 0.0% 18.00 18.00 0.0%
511.povray_r 1937.00 1937.00 0.0% 3629.00 3629.00 0.0% 4914.00 4914.00 0.0%
523.xalancbmk_r 1548.00 1548.00 0.0% 2466.00 2466.00 0.0% 13983.00 13983.00 0.0%
502.gcc_r 12450.00 12446.00 -0.0% 27328.00 27312.00 -0.1% 50527.00 50533.00 0.0%
602.gcc_s 12450.00 12446.00 -0.0% 27328.00 27312.00 -0.1% 50527.00 50533.00 0.0%
500.perlbench_r 4178.00 4175.00 -0.1% 9162.00 9061.00 -1.1% 10223.00 10392.00 1.7%
600.perlbench_s 4178.00 4175.00 -0.1% 9162.00 9061.00 -1.1% 10223.00 10392.00 1.7%
526.blender_r 13105.00 13081.00 -0.2% 26478.00 26438.00 -0.2% 65188.00 65230.00 0.1%
638.imagick_s 4181.00 4157.00 -0.6% 11342.00 11316.00 -0.2% 10884.00 10938.00 0.5%
538.imagick_r 4181.00 4157.00 -0.6% 11342.00 11316.00 -0.2% 10884.00 10938.00 0.5%
531.deepsjeng_r 353.00 345.00 -2.3% 682.00 674.00 -1.2% 530.00 538.00 1.5%
631.deepsjeng_s 353.00 345.00 -2.3% 682.00 674.00 -1.2% 530.00 538.00 1.5%
Geomean difference -0.1% -0.5% 0.3%
The slight increase in spills in the xz benchmarks are from scalar spills (presumably due to more uses of the scalar operand affecting spill weights), we still manage to remove some vector spills in it too.
InlineSpiller will check to make sure that the scalar operand is live at the point where the rematerialization occurs, so this won't extend any scalar live ranges. However this also means we may not be able to rematerialize in some cases, as shown in @vmv.v.x_needs_extended.
It might be worthwhile teaching InlineSpiller to extend scalar live ranges in a future patch. I experimented with this locally and it reduced spills on 531.deepsjeng_r by a further 3%.
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 1 +
.../Target/RISCV/RISCVInstrInfoVPseudos.td | 1 +
llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll | 157 ++++++--------
llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll | 196 ++++++++----------
llvm/test/CodeGen/RISCV/rvv/remat.ll | 161 +++++++++-----
5 files changed, 263 insertions(+), 253 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 325a50c9f48a1c..2bb9df4ead0e9c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -169,6 +169,7 @@ Register RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
bool RISCVInstrInfo::isReallyTriviallyReMaterializable(
const MachineInstr &MI) const {
switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
+ case RISCV::VMV_V_X:
case RISCV::VMV_V_I:
case RISCV::VID_V:
if (MI.getOperand(1).isUndef() &&
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index e11f176bfe6041..c6cecb7d07182f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -2475,6 +2475,7 @@ multiclass VPseudoUnaryVMV_V_X_I {
def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
SchedUnary<"WriteVIMovV", "ReadVIMovV", mx,
forcePassthruRead=true>;
+ let isReMaterializable = 1 in
def "_X_" # mx : VPseudoUnaryNoMask<m.vrclass, GPR>,
SchedUnary<"WriteVIMovX", "ReadVIMovX", mx,
forcePassthruRead=true>;
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
index 01aac122d5957d..7031f93edc2c3e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
@@ -2022,14 +2022,9 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 56 * vlenb
-; RV32-NEXT: vmv1r.v v24, v0
+; RV32-NEXT: vmv1r.v v7, v0
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 48
+; RV32-NEXT: li a2, 40
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
@@ -2045,7 +2040,7 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
+; RV32-NEXT: li a4, 48
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
@@ -2053,67 +2048,53 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: lui a3, 349525
; RV32-NEXT: addi a3, a3, 1365
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v16, a3
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 24
-; RV32-NEXT: mul a3, a3, a4
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vmv.v.x v8, a3
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vsub.vv v8, v16, v8, v0.t
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: li a4, 48
+; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v24, v8, v0.t
+; RV32-NEXT: vsub.vv v16, v16, v8, v0.t
; RV32-NEXT: lui a3, 209715
; RV32-NEXT: addi a3, a3, 819
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v16, a3
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 5
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmv.v.x v8, a3
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: li a4, 48
+; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: li a4, 48
+; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v16, v8, v0.t
+; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
+; RV32-NEXT: li a4, 48
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV32-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v16, v16, v24, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: lui a3, 61681
@@ -2121,25 +2102,26 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v16, a3
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: li a4, 24
+; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: lui a3, 4112
; RV32-NEXT: addi a3, a3, 257
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a3
+; RV32-NEXT: vmv.v.x v16, a3
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v16, v16, v8, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
; RV32-NEXT: li a2, 56
-; RV32-NEXT: vsrl.vx v8, v16, a2, v0.t
+; RV32-NEXT: vsrl.vx v8, v8, a2, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
@@ -2149,8 +2131,8 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB46_2:
-; RV32-NEXT: vmv1r.v v0, v24
-; RV32-NEXT: li a3, 48
+; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: li a3, 40
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
@@ -2160,71 +2142,64 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 24
-; RV32-NEXT: mul a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 48
-; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v16, v8, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 48
+; RV32-NEXT: li a1, 40
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v16, v8, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 40
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 24
+; RV32-NEXT: li a1, 40
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 48
-; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t
+; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vmv8r.v v16, v8
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 40
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 24
-; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: li a1, 24
+; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
@@ -2386,23 +2361,23 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64_unmasked(<vscale x 16 x i64> %va,
; RV32-NEXT: vs8r.v v0, (a3) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vand.vv v24, v24, v0
-; RV32-NEXT: vsub.vv v24, v16, v24
+; RV32-NEXT: vsub.vv v16, v16, v24
; RV32-NEXT: lui a3, 209715
; RV32-NEXT: addi a3, a3, 819
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v0, a3
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v24, v0
-; RV32-NEXT: vsrl.vi v24, v24, 2
+; RV32-NEXT: vand.vv v24, v16, v0
+; RV32-NEXT: vsrl.vi v16, v16, 2
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
; RV32-NEXT: vs8r.v v0, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vv v24, v24, v0
-; RV32-NEXT: vadd.vv v24, v16, v24
-; RV32-NEXT: vsrl.vi v16, v24, 4
+; RV32-NEXT: vand.vv v16, v16, v0
; RV32-NEXT: vadd.vv v16, v24, v16
+; RV32-NEXT: vsrl.vi v24, v16, 4
+; RV32-NEXT: vadd.vv v16, v16, v24
; RV32-NEXT: lui a3, 61681
; RV32-NEXT: addi a3, a3, -241
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
@@ -2437,16 +2412,16 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64_unmasked(<vscale x 16 x i64> %va,
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v24, v24, v0
-; RV32-NEXT: vsub.vv v24, v8, v24
+; RV32-NEXT: vsub.vv v8, v8, v24
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v8, v24, v0
-; RV32-NEXT: vsrl.vi v24, v24, 2
-; RV32-NEXT: vand.vv v24, v24, v0
-; RV32-NEXT: vadd.vv v8, v8, v24
+; RV32-NEXT: vand.vv v24, v8, v0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vv v8, v8, v0
+; RV32-NEXT: vadd.vv v8, v24, v8
; RV32-NEXT: vsrl.vi v24, v8, 4
; RV32-NEXT: vadd.vv v8, v8, v24
; RV32-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
index 0ef0a431dabc43..d36240e493e41d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
@@ -2266,7 +2266,7 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: vnot.v v16, v16, v0.t
; RV32-NEXT: vand.vv v8, v16, v8, v0.t
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: li a5, 40
+; RV32-NEXT: li a5, 24
; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
@@ -2283,12 +2283,18 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v8, a4
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: li a5, 24
+; RV32-NEXT: li a5, 40
; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: li a5, 40
+; RV32-NEXT: mul a4, a4, a5
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: li a5, 48
; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
@@ -2297,55 +2303,51 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vand.vv v8, v16, v8, v0.t
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: li a5, 40
+; RV32-NEXT: li a5, 24
; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload
-; RV32-NEXT: vsub.vv v8, v16, v8, v0.t
+; RV32-NEXT: vsub.vv v16, v16, v8, v0.t
+; RV32-NEXT: lui a4, 209715
+; RV32-NEXT: addi a4, a4, 819
+; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v8, a4
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: li a5, 40
+; RV32-NEXT: li a5, 48
; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: lui a4, 209715
-; RV32-NEXT: addi a4, a4, 819
-; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v16, a4
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: li a5, 40
+; RV32-NEXT: li a5, 48
; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: vand.vv v8, v16, v8, v0.t
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: slli a4, a4, 4
+; RV32-NEXT: li a5, 24
+; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: li a5, 40
+; RV32-NEXT: li a5, 48
; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
-; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: li a5, 48
+; RV32-NEXT: li a5, 24
; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: slli a4, a4, 4
-; RV32-NEXT: add a4, sp, a4
-; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload
-; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV32-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: lui a4, 61681
@@ -2353,26 +2355,30 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v16, a4
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: slli a4, a4, 4
+; RV32-NEXT: li a5, 24
+; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: lui a4, 4112
; RV32-NEXT: addi a4, a4, 257
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a4
+; RV32-NEXT: vmv.v.x v16, a4
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: slli a4, a4, 4
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v16, v16, v8, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
; RV32-NEXT: li a3, 56
-; RV32-NEXT: vsrl.vx v8, v16, a3, v0.t
-; RV32-NEXT: addi a4, sp, 16
+; RV32-NEXT: vsrl.vx v8, v8, a3, v0.t
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
; RV32-NEXT: bltu a0, a1, .LBB46_2
; RV32-NEXT: # %bb.1:
@@ -2382,40 +2388,32 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: slli a1, a1, 5
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vsub.vx v16, v8, a2, v0.t
-; RV32-NEXT: vnot.v v8, v8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vsub.vx v8, v16, a2, v0.t
+; RV32-NEXT: vnot.v v16, v16, v0.t
+; RV32-NEXT: vand.vv v8, v16, v8, v0.t
+; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 40
-; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 24
+; RV32-NEXT: li a1, 40
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 40
-; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v16, v8, v16, v0.t
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
@@ -2442,6 +2440,7 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vmv8r.v v16, v8
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 40
; RV32-NEXT: mul a0, a0, a1
@@ -2449,12 +2448,6 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 48
-; RV32-NEXT: mul a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 5
@@ -2465,19 +2458,23 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: li a1, 24
+; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
; RV32-NEXT: vsrl.vx v8, v8, a3, v0.t
-; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 56
@@ -2608,15 +2605,9 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64_unmasked(<vscale x 16 x i64> %va, i
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 40
-; RV32-NEXT: mul a1, a1, a2
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
-; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: slli a1, a1, 5
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: sub a2, a0, a1
; RV32-NEXT: sltu a3, a0, a2
@@ -2624,22 +2615,22 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64_unmasked(<vscale x 16 x i64> %va, i
; RV32-NEXT: and a3, a3, a2
; RV32-NEXT: li a2, 1
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV32-NEXT: vsub.vx v8, v16, a2
+; RV32-NEXT: vsub.vx v24, v16, a2
; RV32-NEXT: vnot.v v16, v16
-; RV32-NEXT: vand.vv v16, v16, v8
+; RV32-NEXT: vand.vv v16, v16, v24
; RV32-NEXT: vsrl.vi v24, v16, 1
; RV32-NEXT: lui a4, 349525
; RV32-NEXT: addi a4, a4, 1365
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a4
+; RV32-NEXT: vmv.v.x v0, a4
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: li a5, 24
; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v0, (a4) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v24, v8
+; RV32-NEXT: vand.vv v24, v24, v0
; RV32-NEXT: vsub.vv v16, v16, v24
; RV32-NEXT: lui a4, 209715
; RV32-NEXT: addi a4, a4, 819
@@ -2648,6 +2639,11 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64_unmasked(<vscale x 16 x i64> %va, i
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vand.vv v24, v16, v0
; RV32-NEXT: vsrl.vi v16, v16, 2
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 4
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v0, (a4) # Unknown-size Folded Spill
; RV32-NEXT: vand.vv v16, v16, v0
; RV32-NEXT: vadd.vv v16, v24, v16
; RV32-NEXT: vsrl.vi v24, v16, 4
@@ -2655,50 +2651,46 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64_unmasked(<vscale x 16 x i64> %va, i
; RV32-NEXT: lui a4, 61681
; RV32-NEXT: addi a4, a4, -241
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a4
+; RV32-NEXT: vmv.v.x v24, a4
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: slli a4, a4, 4
+; RV32-NEXT: slli a4, a4, 3
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v16, v8
+; RV32-NEXT: vand.vv v16, v16, v24
; RV32-NEXT: lui a4, 4112
; RV32-NEXT: addi a4, a4, 257
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v16, a4
-; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: slli a4, a4, 3
-; RV32-NEXT: add a4, sp, a4
-; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vmv.v.x v24, a4
+; RV32-NEXT: addi a4, sp, 16
+; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v16, v16, v24
; RV32-NEXT: li a3, 56
-; RV32-NEXT: vsrl.vx v8, v8, a3
-; RV32-NEXT: addi a4, sp, 16
-; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vsrl.vx v16, v16, a3
; RV32-NEXT: bltu a0, a1, .LBB47_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB47_2:
-; RV32-NEXT: slli a1, a1, 5
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vsub.vx v8, v24, a2
-; RV32-NEXT: vnot.v v24, v24
-; RV32-NEXT: vand.vv v8, v24, v8
+; RV32-NEXT: vsub.vx v24, v8, a2
+; RV32-NEXT: vnot.v v8, v8
+; RV32-NEXT: vand.vv v8, v8, v24
; RV32-NEXT: vsrl.vi v24, v8, 1
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v24, v24, v16
+; RV32-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v24, v24, v0
; RV32-NEXT: vsub.vv v8, v8, v24
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v24, v8, v0
; RV32-NEXT: vsrl.vi v8, v8, 2
; RV32-NEXT: vand.vv v8, v8, v0
@@ -2706,23 +2698,17 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64_unmasked(<vscale x 16 x i64> %va, i
; RV32-NEXT: vsrl.vi v24, v8, 4
; RV32-NEXT: vadd.vv v8, v8, v24
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: vsrl.vx v8, v8, a3
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v24
; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vmul.vv v8, v8, v24
+; RV32-NEXT: vsrl.vx v8, v8, a3
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 40
-; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/remat.ll b/llvm/test/CodeGen/RISCV/rvv/remat.ll
index 2b12b88adba423..514612cd0525d8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/remat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/remat.ll
@@ -175,34 +175,63 @@ define void @vmv.v.i(ptr %p) {
; The live range of %x needs extended down to the use of vmv.v.x at the end of
; the block.
define void @vmv.v.x_needs_extended(ptr %p, i64 %x) {
-; CHECK-LABEL: vmv.v.x_needs_extended:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: sub sp, sp, a2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv.v.x v8, a1
-; CHECK-NEXT: vs8r.v v8, (a0)
-; CHECK-NEXT: vl8re64.v v16, (a0)
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: vl8re64.v v0, (a0)
-; CHECK-NEXT: vl8re64.v v16, (a0)
-; CHECK-NEXT: vs8r.v v16, (a0)
-; CHECK-NEXT: vs8r.v v0, (a0)
-; CHECK-NEXT: vs8r.v v24, (a0)
-; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT: vs8r.v v16, (a0)
-; CHECK-NEXT: vs8r.v v8, (a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+; POSTRA-LABEL: vmv.v.x_needs_extended:
+; POSTRA: # %bb.0:
+; POSTRA-NEXT: addi sp, sp, -16
+; POSTRA-NEXT: .cfi_def_cfa_offset 16
+; POSTRA-NEXT: csrr a2, vlenb
+; POSTRA-NEXT: slli a2, a2, 3
+; POSTRA-NEXT: sub sp, sp, a2
+; POSTRA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; POSTRA-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; POSTRA-NEXT: vmv.v.x v8, a1
+; POSTRA-NEXT: addi a1, sp, 16
+; POSTRA-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; POSTRA-NEXT: vs8r.v v8, (a0)
+; POSTRA-NEXT: vl8re64.v v16, (a0)
+; POSTRA-NEXT: vl8re64.v v24, (a0)
+; POSTRA-NEXT: vl8re64.v v0, (a0)
+; POSTRA-NEXT: vl8re64.v v8, (a0)
+; POSTRA-NEXT: vs8r.v v8, (a0)
+; POSTRA-NEXT: vs8r.v v0, (a0)
+; POSTRA-NEXT: vs8r.v v24, (a0)
+; POSTRA-NEXT: vs8r.v v16, (a0)
+; POSTRA-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; POSTRA-NEXT: vs8r.v v8, (a0)
+; POSTRA-NEXT: csrr a0, vlenb
+; POSTRA-NEXT: slli a0, a0, 3
+; POSTRA-NEXT: add sp, sp, a0
+; POSTRA-NEXT: addi sp, sp, 16
+; POSTRA-NEXT: ret
+;
+; PRERA-LABEL: vmv.v.x_needs_extended:
+; PRERA: # %bb.0:
+; PRERA-NEXT: addi sp, sp, -16
+; PRERA-NEXT: .cfi_def_cfa_offset 16
+; PRERA-NEXT: csrr a2, vlenb
+; PRERA-NEXT: slli a2, a2, 3
+; PRERA-NEXT: sub sp, sp, a2
+; PRERA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; PRERA-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; PRERA-NEXT: vmv.v.x v8, a1
+; PRERA-NEXT: vs8r.v v8, (a0)
+; PRERA-NEXT: vl8re64.v v16, (a0)
+; PRERA-NEXT: addi a1, sp, 16
+; PRERA-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; PRERA-NEXT: vl8re64.v v24, (a0)
+; PRERA-NEXT: vl8re64.v v0, (a0)
+; PRERA-NEXT: vl8re64.v v16, (a0)
+; PRERA-NEXT: vs8r.v v16, (a0)
+; PRERA-NEXT: vs8r.v v0, (a0)
+; PRERA-NEXT: vs8r.v v24, (a0)
+; PRERA-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; PRERA-NEXT: vs8r.v v16, (a0)
+; PRERA-NEXT: vs8r.v v8, (a0)
+; PRERA-NEXT: csrr a0, vlenb
+; PRERA-NEXT: slli a0, a0, 3
+; PRERA-NEXT: add sp, sp, a0
+; PRERA-NEXT: addi sp, sp, 16
+; PRERA-NEXT: ret
%vmv.v.x = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(<vscale x 8 x i64> poison, i64 %x, i64 -1)
store volatile <vscale x 8 x i64> %vmv.v.x, ptr %p
@@ -220,35 +249,53 @@ define void @vmv.v.x_needs_extended(ptr %p, i64 %x) {
}
define void @vmv.v.x_live(ptr %p, i64 %x) {
-; CHECK-LABEL: vmv.v.x_live:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: sub sp, sp, a2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv.v.x v8, a1
-; CHECK-NEXT: vs8r.v v8, (a0)
-; CHECK-NEXT: vl8re64.v v16, (a0)
-; CHECK-NEXT: addi a2, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: vl8re64.v v0, (a0)
-; CHECK-NEXT: vl8re64.v v16, (a0)
-; CHECK-NEXT: vs8r.v v16, (a0)
-; CHECK-NEXT: vs8r.v v0, (a0)
-; CHECK-NEXT: vs8r.v v24, (a0)
-; CHECK-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
-; CHECK-NEXT: vs8r.v v16, (a0)
-; CHECK-NEXT: vs8r.v v8, (a0)
-; CHECK-NEXT: sd a1, 0(a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+; POSTRA-LABEL: vmv.v.x_live:
+; POSTRA: # %bb.0:
+; POSTRA-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; POSTRA-NEXT: vmv.v.x v8, a1
+; POSTRA-NEXT: vs8r.v v8, (a0)
+; POSTRA-NEXT: vl8re64.v v16, (a0)
+; POSTRA-NEXT: vl8re64.v v24, (a0)
+; POSTRA-NEXT: vl8re64.v v0, (a0)
+; POSTRA-NEXT: vl8re64.v v8, (a0)
+; POSTRA-NEXT: vs8r.v v8, (a0)
+; POSTRA-NEXT: vs8r.v v0, (a0)
+; POSTRA-NEXT: vs8r.v v24, (a0)
+; POSTRA-NEXT: vs8r.v v16, (a0)
+; POSTRA-NEXT: vmv.v.x v8, a1
+; POSTRA-NEXT: vs8r.v v8, (a0)
+; POSTRA-NEXT: sd a1, 0(a0)
+; POSTRA-NEXT: ret
+;
+; PRERA-LABEL: vmv.v.x_live:
+; PRERA: # %bb.0:
+; PRERA-NEXT: addi sp, sp, -16
+; PRERA-NEXT: .cfi_def_cfa_offset 16
+; PRERA-NEXT: csrr a2, vlenb
+; PRERA-NEXT: slli a2, a2, 3
+; PRERA-NEXT: sub sp, sp, a2
+; PRERA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; PRERA-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; PRERA-NEXT: vmv.v.x v8, a1
+; PRERA-NEXT: vs8r.v v8, (a0)
+; PRERA-NEXT: vl8re64.v v16, (a0)
+; PRERA-NEXT: addi a2, sp, 16
+; PRERA-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; PRERA-NEXT: vl8re64.v v24, (a0)
+; PRERA-NEXT: vl8re64.v v0, (a0)
+; PRERA-NEXT: vl8re64.v v16, (a0)
+; PRERA-NEXT: vs8r.v v16, (a0)
+; PRERA-NEXT: vs8r.v v0, (a0)
+; PRERA-NEXT: vs8r.v v24, (a0)
+; PRERA-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; PRERA-NEXT: vs8r.v v16, (a0)
+; PRERA-NEXT: vs8r.v v8, (a0)
+; PRERA-NEXT: sd a1, 0(a0)
+; PRERA-NEXT: csrr a0, vlenb
+; PRERA-NEXT: slli a0, a0, 3
+; PRERA-NEXT: add sp, sp, a0
+; PRERA-NEXT: addi sp, sp, 16
+; PRERA-NEXT: ret
%vmv.v.x = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(<vscale x 8 x i64> poison, i64 %x, i64 -1)
store volatile <vscale x 8 x i64> %vmv.v.x, ptr %p
More information about the llvm-commits
mailing list