[llvm] [RISCV] Refine location size for segment spill and fill (PR #133268)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Mar 27 08:39:00 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Philip Reames (preames)
<details>
<summary>Changes</summary>
This is a follow up to #<!-- -->133171. I realized we could assume the structure of the previous MMO, and thus the split is much simpler than I'd initially pictured.
---
Patch is 47.99 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/133268.diff
6 Files Affected:
- (modified) llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp (+8-2)
- (modified) llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll (+66-66)
- (modified) llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll (+66-66)
- (modified) llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir (+14-14)
``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index df7e0c8f022c8..b790b3fd7cebe 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -408,8 +408,11 @@ void RISCVRegisterInfo::lowerVSPILL(MachineBasicBlock::iterator II) const {
Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
auto *OldMMO = *(II->memoperands_begin());
+ LocationSize OldLoc = OldMMO->getSize();
+ assert(OldLoc.isPrecise() && OldLoc.getValue().isKnownMultipleOf(NF));
+ TypeSize NewSize = OldLoc.getValue().divideCoefficientBy(NF);
auto *NewMMO = MF.getMachineMemOperand(OldMMO, OldMMO->getOffset(),
- LocationSize::beforeOrAfterPointer());
+ LocationSize::precise(NewSize));
for (unsigned I = 0; I < NF; ++I) {
// Adding implicit-use of super register to describe we are using part of
// super register, that prevents machine verifier complaining when part of
@@ -488,8 +491,11 @@ void RISCVRegisterInfo::lowerVRELOAD(MachineBasicBlock::iterator II) const {
bool IsBaseKill = II->getOperand(1).isKill();
Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
auto *OldMMO = *(II->memoperands_begin());
+ LocationSize OldLoc = OldMMO->getSize();
+ assert(OldLoc.isPrecise() && OldLoc.getValue().isKnownMultipleOf(NF));
+ TypeSize NewSize = OldLoc.getValue().divideCoefficientBy(NF);
auto *NewMMO = MF.getMachineMemOperand(OldMMO, OldMMO->getOffset(),
- LocationSize::beforeOrAfterPointer());
+ LocationSize::precise(NewSize));
for (unsigned I = 0; I < NF; ++I) {
BuildMI(MBB, II, DL, TII->get(Opcode),
TRI->getSubReg(DestReg, SubRegIdx + I))
diff --git a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
index 4c71e6c4a4627..0afdcdccd9246 100644
--- a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
+++ b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
@@ -42,13 +42,13 @@ define void @_Z3foov() {
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs2r.v v10, (a0) # vscale x 16-byte Folded Spill
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs2r.v v12, (a0) # vscale x 16-byte Folded Spill
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vs2r.v v14, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs2r.v v14, (a0) # vscale x 16-byte Folded Spill
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_40)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_40)
; CHECK-NEXT: #APP
@@ -61,13 +61,13 @@ define void @_Z3foov() {
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vl2r.v v10, (a0) # vscale x 16-byte Folded Reload
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vl2r.v v12, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vl2r.v v12, (a0) # vscale x 16-byte Folded Reload
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vl2r.v v14, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vl2r.v v14, (a0) # vscale x 16-byte Folded Reload
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
index 7cb1fbf9a2344..878b180e34c01 100644
--- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
+++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
@@ -34,9 +34,9 @@ define void @last_chance_recoloring_failure() {
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 2
-; CHECK-NEXT: vs4r.v v16, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs4r.v v16, (a0) # vscale x 32-byte Folded Spill
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vs4r.v v20, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs4r.v v20, (a0) # vscale x 32-byte Folded Spill
; CHECK-NEXT: li s0, 36
; CHECK-NEXT: vsetvli zero, s0, e16, m4, ta, ma
; CHECK-NEXT: vfwadd.vv v16, v8, v12, v0.t
@@ -49,9 +49,9 @@ define void @last_chance_recoloring_failure() {
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 2
-; CHECK-NEXT: vl4r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vl4r.v v16, (a0) # vscale x 32-byte Folded Reload
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vl4r.v v20, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vl4r.v v20, (a0) # vscale x 32-byte Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
; CHECK-NEXT: vsetvli zero, s0, e16, m4, ta, ma
@@ -94,9 +94,9 @@ define void @last_chance_recoloring_failure() {
; SUBREGLIVENESS-NEXT: addi a0, a0, 16
; SUBREGLIVENESS-NEXT: csrr a1, vlenb
; SUBREGLIVENESS-NEXT: slli a1, a1, 2
-; SUBREGLIVENESS-NEXT: vs4r.v v16, (a0) # Unknown-size Folded Spill
+; SUBREGLIVENESS-NEXT: vs4r.v v16, (a0) # vscale x 32-byte Folded Spill
; SUBREGLIVENESS-NEXT: add a0, a0, a1
-; SUBREGLIVENESS-NEXT: vs4r.v v20, (a0) # Unknown-size Folded Spill
+; SUBREGLIVENESS-NEXT: vs4r.v v20, (a0) # vscale x 32-byte Folded Spill
; SUBREGLIVENESS-NEXT: li s0, 36
; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, ta, ma
; SUBREGLIVENESS-NEXT: vfwadd.vv v16, v8, v12, v0.t
@@ -109,9 +109,9 @@ define void @last_chance_recoloring_failure() {
; SUBREGLIVENESS-NEXT: addi a0, a0, 16
; SUBREGLIVENESS-NEXT: csrr a1, vlenb
; SUBREGLIVENESS-NEXT: slli a1, a1, 2
-; SUBREGLIVENESS-NEXT: vl4r.v v16, (a0) # Unknown-size Folded Reload
+; SUBREGLIVENESS-NEXT: vl4r.v v16, (a0) # vscale x 32-byte Folded Reload
; SUBREGLIVENESS-NEXT: add a0, a0, a1
-; SUBREGLIVENESS-NEXT: vl4r.v v20, (a0) # Unknown-size Folded Reload
+; SUBREGLIVENESS-NEXT: vl4r.v v20, (a0) # vscale x 32-byte Folded Reload
; SUBREGLIVENESS-NEXT: addi a0, sp, 16
; SUBREGLIVENESS-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll
index fdfd4ad63fde6..663bb1fc15517 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll
@@ -42,15 +42,15 @@ define <vscale x 1 x i32> @spill_zvlsseg_nxv1i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
-; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
; SPILL-O2-NEXT: addi a0, sp, 16
-; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
; SPILL-O2-NEXT: add sp, sp, a0
@@ -65,16 +65,16 @@ define <vscale x 1 x i32> @spill_zvlsseg_nxv1i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-VLEN128-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 16
-; SPILL-O2-VLEN128-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: #APP
; SPILL-O2-VLEN128-NEXT: #NO_APP
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 16
-; SPILL-O2-VLEN128-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 32
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
; SPILL-O2-VLEN128-NEXT: ret
@@ -109,15 +109,15 @@ define <vscale x 1 x i32> @spill_zvlsseg_nxv1i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-VSETVLI-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
-; SPILL-O2-VSETVLI-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: #APP
; SPILL-O2-VSETVLI-NEXT: #NO_APP
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
-; SPILL-O2-VSETVLI-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; SPILL-O2-VSETVLI-NEXT: add sp, sp, a0
; SPILL-O2-VSETVLI-NEXT: addi sp, sp, 16
@@ -162,15 +162,15 @@ define <vscale x 2 x i32> @spill_zvlsseg_nxv2i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
-; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
; SPILL-O2-NEXT: addi a0, sp, 16
-; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
; SPILL-O2-NEXT: add sp, sp, a0
@@ -185,16 +185,16 @@ define <vscale x 2 x i32> @spill_zvlsseg_nxv2i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-VLEN128-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 16
-; SPILL-O2-VLEN128-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: #APP
; SPILL-O2-VLEN128-NEXT: #NO_APP
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 16
-; SPILL-O2-VLEN128-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 32
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
; SPILL-O2-VLEN128-NEXT: ret
@@ -229,15 +229,15 @@ define <vscale x 2 x i32> @spill_zvlsseg_nxv2i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-VSETVLI-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
-; SPILL-O2-VSETVLI-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: #APP
; SPILL-O2-VSETVLI-NEXT: #NO_APP
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
-; SPILL-O2-VSETVLI-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; SPILL-O2-VSETVLI-NEXT: add sp, sp, a0
; SPILL-O2-VSETVLI-NEXT: addi sp, sp, 16
@@ -285,17 +285,17 @@ define <vscale x 4 x i32> @spill_zvlsseg_nxv4i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: slli a1, a1, 1
-; SPILL-O2-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs2r.v v10, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: slli a1, a1, 1
-; SPILL-O2-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl2r.v v6, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 2
; SPILL-O2-NEXT: add sp, sp, a0
@@ -310,16 +310,16 @@ define <vscale x 4 x i32> @spill_zvlsseg_nxv4i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-VLEN128-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 32
-; SPILL-O2-VLEN128-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs2r.v v10, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: #APP
; SPILL-O2-VLEN128-NEXT: #NO_APP
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 32
-; SPILL-O2-VLEN128-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl2r.v v6, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 64
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
; SPILL-O2-VLEN128-NEXT: ret
@@ -355,17 +355,17 @@ define <vscale x 4 x i32> @spill_zvlsseg_nxv4i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
; SPILL-O2-VSETVLI-NEXT: slli a1, a1, 1
-; SPILL-O2-VSETVLI-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs2r.v v10, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: #APP
; SPILL-O2-VSETVLI-NEXT: #NO_APP
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
; SPILL-O2-VSETVLI-NEXT: slli a1, a1, 1
-; SPILL-O2-VSETVLI-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl2r.v v6, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; SPILL-O2-VSETVLI-NEXT: add sp, sp, a0
; SPILL-O2-VSETVLI-NEXT: addi sp, sp, 16
@@ -413,17 +413,17 @@ define <vscale x 8 x i32> @spill_zvlsseg_nxv8i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: slli a1, a1, 2
-; SPILL-O2-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs4r.v v8, (a0) # vscale x 32-byte Folded Spill
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs4r.v v12, (a0) # vscale x 32-byte Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: slli a1, a1, 2
-; SPILL-O2-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl4r.v v4, (a0) # vscale x 32-byte Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 3
; SPILL-O2-NEXT: add sp, sp, a0
@@ -438,16 +438,16 @@ define <vscale x 8 x i32> @spill_zvlsseg_nxv8i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-VLEN128-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 64
-; SPILL-O2-VLEN128-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs4r.v v8, (a0) # vscale x 32-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs4r.v v12, (a0) # vscale x 32-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: #APP
; SPILL-O2-VLEN128-NEXT: #NO_APP
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 64
-; SPILL-O2-VLEN128-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl4r.v v4, (a0) # vscale x 32-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 128
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
; SPILL-O2-VLEN128-NEXT: ret
@@ -483,17 +483,17 @@ define <vscale x 8 x i32> @spill_zvlsseg_nxv8i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
; SPILL-O2-VSETVLI-NEXT: slli a1, a1, 2
-; SPILL-O2-VSETVLI-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs4r.v v8, (a0) # vscale x 32-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs4r.v v12, (a0) #...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/133268
More information about the llvm-commits
mailing list