[llvm] [RISCV] Refine location size for segment spill and fill (PR #133268)
Philip Reames via llvm-commits
llvm-commits at lists.llvm.org
Thu Mar 27 11:12:42 PDT 2025
https://github.com/preames updated https://github.com/llvm/llvm-project/pull/133268
>From 73a77e6354a4ef9c519bf15d9ee7d78de7fc974b Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Thu, 27 Mar 2025 07:51:47 -0700
Subject: [PATCH 1/2] [RISCV] Refine location size for segment spill and fill
This is a follow up to #133171. I realized we could assume the
structure of the previous MMO, and thus the split is much simpler
than I'd initially pictured.
---
llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp | 10 +-
.../early-clobber-tied-def-subreg-liveness.ll | 16 +--
...regalloc-last-chance-recoloring-failure.ll | 16 +--
.../CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll | 132 +++++++++---------
.../CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll | 132 +++++++++---------
llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir | 28 ++--
6 files changed, 170 insertions(+), 164 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index df7e0c8f022c8..b790b3fd7cebe 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -408,8 +408,11 @@ void RISCVRegisterInfo::lowerVSPILL(MachineBasicBlock::iterator II) const {
Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
auto *OldMMO = *(II->memoperands_begin());
+ LocationSize OldLoc = OldMMO->getSize();
+ assert(OldLoc.isPrecise() && OldLoc.getValue().isKnownMultipleOf(NF));
+ TypeSize NewSize = OldLoc.getValue().divideCoefficientBy(NF);
auto *NewMMO = MF.getMachineMemOperand(OldMMO, OldMMO->getOffset(),
- LocationSize::beforeOrAfterPointer());
+ LocationSize::precise(NewSize));
for (unsigned I = 0; I < NF; ++I) {
// Adding implicit-use of super register to describe we are using part of
// super register, that prevents machine verifier complaining when part of
@@ -488,8 +491,11 @@ void RISCVRegisterInfo::lowerVRELOAD(MachineBasicBlock::iterator II) const {
bool IsBaseKill = II->getOperand(1).isKill();
Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
auto *OldMMO = *(II->memoperands_begin());
+ LocationSize OldLoc = OldMMO->getSize();
+ assert(OldLoc.isPrecise() && OldLoc.getValue().isKnownMultipleOf(NF));
+ TypeSize NewSize = OldLoc.getValue().divideCoefficientBy(NF);
auto *NewMMO = MF.getMachineMemOperand(OldMMO, OldMMO->getOffset(),
- LocationSize::beforeOrAfterPointer());
+ LocationSize::precise(NewSize));
for (unsigned I = 0; I < NF; ++I) {
BuildMI(MBB, II, DL, TII->get(Opcode),
TRI->getSubReg(DestReg, SubRegIdx + I))
diff --git a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
index 4c71e6c4a4627..0afdcdccd9246 100644
--- a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
+++ b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
@@ -42,13 +42,13 @@ define void @_Z3foov() {
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs2r.v v10, (a0) # vscale x 16-byte Folded Spill
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs2r.v v12, (a0) # vscale x 16-byte Folded Spill
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vs2r.v v14, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs2r.v v14, (a0) # vscale x 16-byte Folded Spill
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_40)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_40)
; CHECK-NEXT: #APP
@@ -61,13 +61,13 @@ define void @_Z3foov() {
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vl2r.v v10, (a0) # vscale x 16-byte Folded Reload
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vl2r.v v12, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vl2r.v v12, (a0) # vscale x 16-byte Folded Reload
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vl2r.v v14, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vl2r.v v14, (a0) # vscale x 16-byte Folded Reload
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
index 7cb1fbf9a2344..878b180e34c01 100644
--- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
+++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
@@ -34,9 +34,9 @@ define void @last_chance_recoloring_failure() {
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 2
-; CHECK-NEXT: vs4r.v v16, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs4r.v v16, (a0) # vscale x 32-byte Folded Spill
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vs4r.v v20, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs4r.v v20, (a0) # vscale x 32-byte Folded Spill
; CHECK-NEXT: li s0, 36
; CHECK-NEXT: vsetvli zero, s0, e16, m4, ta, ma
; CHECK-NEXT: vfwadd.vv v16, v8, v12, v0.t
@@ -49,9 +49,9 @@ define void @last_chance_recoloring_failure() {
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 2
-; CHECK-NEXT: vl4r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vl4r.v v16, (a0) # vscale x 32-byte Folded Reload
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vl4r.v v20, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vl4r.v v20, (a0) # vscale x 32-byte Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
; CHECK-NEXT: vsetvli zero, s0, e16, m4, ta, ma
@@ -94,9 +94,9 @@ define void @last_chance_recoloring_failure() {
; SUBREGLIVENESS-NEXT: addi a0, a0, 16
; SUBREGLIVENESS-NEXT: csrr a1, vlenb
; SUBREGLIVENESS-NEXT: slli a1, a1, 2
-; SUBREGLIVENESS-NEXT: vs4r.v v16, (a0) # Unknown-size Folded Spill
+; SUBREGLIVENESS-NEXT: vs4r.v v16, (a0) # vscale x 32-byte Folded Spill
; SUBREGLIVENESS-NEXT: add a0, a0, a1
-; SUBREGLIVENESS-NEXT: vs4r.v v20, (a0) # Unknown-size Folded Spill
+; SUBREGLIVENESS-NEXT: vs4r.v v20, (a0) # vscale x 32-byte Folded Spill
; SUBREGLIVENESS-NEXT: li s0, 36
; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, ta, ma
; SUBREGLIVENESS-NEXT: vfwadd.vv v16, v8, v12, v0.t
@@ -109,9 +109,9 @@ define void @last_chance_recoloring_failure() {
; SUBREGLIVENESS-NEXT: addi a0, a0, 16
; SUBREGLIVENESS-NEXT: csrr a1, vlenb
; SUBREGLIVENESS-NEXT: slli a1, a1, 2
-; SUBREGLIVENESS-NEXT: vl4r.v v16, (a0) # Unknown-size Folded Reload
+; SUBREGLIVENESS-NEXT: vl4r.v v16, (a0) # vscale x 32-byte Folded Reload
; SUBREGLIVENESS-NEXT: add a0, a0, a1
-; SUBREGLIVENESS-NEXT: vl4r.v v20, (a0) # Unknown-size Folded Reload
+; SUBREGLIVENESS-NEXT: vl4r.v v20, (a0) # vscale x 32-byte Folded Reload
; SUBREGLIVENESS-NEXT: addi a0, sp, 16
; SUBREGLIVENESS-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll
index fdfd4ad63fde6..663bb1fc15517 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll
@@ -42,15 +42,15 @@ define <vscale x 1 x i32> @spill_zvlsseg_nxv1i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
-; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
; SPILL-O2-NEXT: addi a0, sp, 16
-; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
; SPILL-O2-NEXT: add sp, sp, a0
@@ -65,16 +65,16 @@ define <vscale x 1 x i32> @spill_zvlsseg_nxv1i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-VLEN128-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 16
-; SPILL-O2-VLEN128-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: #APP
; SPILL-O2-VLEN128-NEXT: #NO_APP
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 16
-; SPILL-O2-VLEN128-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 32
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
; SPILL-O2-VLEN128-NEXT: ret
@@ -109,15 +109,15 @@ define <vscale x 1 x i32> @spill_zvlsseg_nxv1i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-VSETVLI-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
-; SPILL-O2-VSETVLI-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: #APP
; SPILL-O2-VSETVLI-NEXT: #NO_APP
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
-; SPILL-O2-VSETVLI-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; SPILL-O2-VSETVLI-NEXT: add sp, sp, a0
; SPILL-O2-VSETVLI-NEXT: addi sp, sp, 16
@@ -162,15 +162,15 @@ define <vscale x 2 x i32> @spill_zvlsseg_nxv2i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
-; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
; SPILL-O2-NEXT: addi a0, sp, 16
-; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
; SPILL-O2-NEXT: add sp, sp, a0
@@ -185,16 +185,16 @@ define <vscale x 2 x i32> @spill_zvlsseg_nxv2i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-VLEN128-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 16
-; SPILL-O2-VLEN128-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: #APP
; SPILL-O2-VLEN128-NEXT: #NO_APP
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 16
-; SPILL-O2-VLEN128-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 32
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
; SPILL-O2-VLEN128-NEXT: ret
@@ -229,15 +229,15 @@ define <vscale x 2 x i32> @spill_zvlsseg_nxv2i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-VSETVLI-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
-; SPILL-O2-VSETVLI-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: #APP
; SPILL-O2-VSETVLI-NEXT: #NO_APP
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
-; SPILL-O2-VSETVLI-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; SPILL-O2-VSETVLI-NEXT: add sp, sp, a0
; SPILL-O2-VSETVLI-NEXT: addi sp, sp, 16
@@ -285,17 +285,17 @@ define <vscale x 4 x i32> @spill_zvlsseg_nxv4i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: slli a1, a1, 1
-; SPILL-O2-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs2r.v v10, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: slli a1, a1, 1
-; SPILL-O2-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl2r.v v6, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 2
; SPILL-O2-NEXT: add sp, sp, a0
@@ -310,16 +310,16 @@ define <vscale x 4 x i32> @spill_zvlsseg_nxv4i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-VLEN128-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 32
-; SPILL-O2-VLEN128-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs2r.v v10, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: #APP
; SPILL-O2-VLEN128-NEXT: #NO_APP
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 32
-; SPILL-O2-VLEN128-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl2r.v v6, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 64
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
; SPILL-O2-VLEN128-NEXT: ret
@@ -355,17 +355,17 @@ define <vscale x 4 x i32> @spill_zvlsseg_nxv4i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
; SPILL-O2-VSETVLI-NEXT: slli a1, a1, 1
-; SPILL-O2-VSETVLI-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs2r.v v10, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: #APP
; SPILL-O2-VSETVLI-NEXT: #NO_APP
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
; SPILL-O2-VSETVLI-NEXT: slli a1, a1, 1
-; SPILL-O2-VSETVLI-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl2r.v v6, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; SPILL-O2-VSETVLI-NEXT: add sp, sp, a0
; SPILL-O2-VSETVLI-NEXT: addi sp, sp, 16
@@ -413,17 +413,17 @@ define <vscale x 8 x i32> @spill_zvlsseg_nxv8i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: slli a1, a1, 2
-; SPILL-O2-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs4r.v v8, (a0) # vscale x 32-byte Folded Spill
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs4r.v v12, (a0) # vscale x 32-byte Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: slli a1, a1, 2
-; SPILL-O2-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl4r.v v4, (a0) # vscale x 32-byte Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 3
; SPILL-O2-NEXT: add sp, sp, a0
@@ -438,16 +438,16 @@ define <vscale x 8 x i32> @spill_zvlsseg_nxv8i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-VLEN128-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 64
-; SPILL-O2-VLEN128-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs4r.v v8, (a0) # vscale x 32-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs4r.v v12, (a0) # vscale x 32-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: #APP
; SPILL-O2-VLEN128-NEXT: #NO_APP
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 64
-; SPILL-O2-VLEN128-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl4r.v v4, (a0) # vscale x 32-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 128
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
; SPILL-O2-VLEN128-NEXT: ret
@@ -483,17 +483,17 @@ define <vscale x 8 x i32> @spill_zvlsseg_nxv8i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
; SPILL-O2-VSETVLI-NEXT: slli a1, a1, 2
-; SPILL-O2-VSETVLI-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs4r.v v8, (a0) # vscale x 32-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs4r.v v12, (a0) # vscale x 32-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: #APP
; SPILL-O2-VSETVLI-NEXT: #NO_APP
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
; SPILL-O2-VSETVLI-NEXT: slli a1, a1, 2
-; SPILL-O2-VSETVLI-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl4r.v v4, (a0) # vscale x 32-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: vsetvli a0, zero, e8, m8, ta, ma
; SPILL-O2-VSETVLI-NEXT: add sp, sp, a0
; SPILL-O2-VSETVLI-NEXT: addi sp, sp, 16
@@ -542,21 +542,21 @@ define <vscale x 4 x i32> @spill_zvlsseg3_nxv4i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: slli a1, a1, 1
-; SPILL-O2-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs2r.v v10, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs2r.v v12, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: slli a1, a1, 1
-; SPILL-O2-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl2r.v v6, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl2r.v v10, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: li a1, 6
; SPILL-O2-NEXT: mul a0, a0, a1
@@ -572,20 +572,20 @@ define <vscale x 4 x i32> @spill_zvlsseg3_nxv4i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-VLEN128-NEXT: vlseg3e32.v v8, (a0)
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 32
-; SPILL-O2-VLEN128-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs2r.v v10, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs2r.v v12, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: #APP
; SPILL-O2-VLEN128-NEXT: #NO_APP
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 32
-; SPILL-O2-VLEN128-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl2r.v v6, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl2r.v v10, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 96
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
; SPILL-O2-VLEN128-NEXT: ret
@@ -623,21 +623,21 @@ define <vscale x 4 x i32> @spill_zvlsseg3_nxv4i32(ptr %base, i32 %vl) nounwind {
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
; SPILL-O2-VSETVLI-NEXT: slli a1, a1, 1
-; SPILL-O2-VSETVLI-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs2r.v v10, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs2r.v v12, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: #APP
; SPILL-O2-VSETVLI-NEXT: #NO_APP
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
; SPILL-O2-VSETVLI-NEXT: slli a1, a1, 1
-; SPILL-O2-VSETVLI-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl2r.v v6, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl2r.v v10, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; SPILL-O2-VSETVLI-NEXT: li a1, 6
; SPILL-O2-VSETVLI-NEXT: mul a0, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll
index fa433e2de6ed7..dc0e8fd987c6d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll
@@ -42,15 +42,15 @@ define <vscale x 1 x i32> @spill_zvlsseg_nxv1i32(ptr %base, i64 %vl) nounwind {
; SPILL-O2-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
-; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
; SPILL-O2-NEXT: addi a0, sp, 16
-; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
; SPILL-O2-NEXT: add sp, sp, a0
@@ -65,16 +65,16 @@ define <vscale x 1 x i32> @spill_zvlsseg_nxv1i32(ptr %base, i64 %vl) nounwind {
; SPILL-O2-VLEN128-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 16
-; SPILL-O2-VLEN128-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: #APP
; SPILL-O2-VLEN128-NEXT: #NO_APP
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 16
-; SPILL-O2-VLEN128-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 32
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
; SPILL-O2-VLEN128-NEXT: ret
@@ -109,15 +109,15 @@ define <vscale x 1 x i32> @spill_zvlsseg_nxv1i32(ptr %base, i64 %vl) nounwind {
; SPILL-O2-VSETVLI-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
-; SPILL-O2-VSETVLI-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: #APP
; SPILL-O2-VSETVLI-NEXT: #NO_APP
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
-; SPILL-O2-VSETVLI-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; SPILL-O2-VSETVLI-NEXT: add sp, sp, a0
; SPILL-O2-VSETVLI-NEXT: addi sp, sp, 16
@@ -162,15 +162,15 @@ define <vscale x 2 x i32> @spill_zvlsseg_nxv2i32(ptr %base, i64 %vl) nounwind {
; SPILL-O2-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
-; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
; SPILL-O2-NEXT: addi a0, sp, 16
-; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
; SPILL-O2-NEXT: add sp, sp, a0
@@ -185,16 +185,16 @@ define <vscale x 2 x i32> @spill_zvlsseg_nxv2i32(ptr %base, i64 %vl) nounwind {
; SPILL-O2-VLEN128-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 16
-; SPILL-O2-VLEN128-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: #APP
; SPILL-O2-VLEN128-NEXT: #NO_APP
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 16
-; SPILL-O2-VLEN128-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 32
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
; SPILL-O2-VLEN128-NEXT: ret
@@ -229,15 +229,15 @@ define <vscale x 2 x i32> @spill_zvlsseg_nxv2i32(ptr %base, i64 %vl) nounwind {
; SPILL-O2-VSETVLI-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
-; SPILL-O2-VSETVLI-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: #APP
; SPILL-O2-VSETVLI-NEXT: #NO_APP
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
-; SPILL-O2-VSETVLI-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl1r.v v7, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; SPILL-O2-VSETVLI-NEXT: add sp, sp, a0
; SPILL-O2-VSETVLI-NEXT: addi sp, sp, 16
@@ -285,17 +285,17 @@ define <vscale x 4 x i32> @spill_zvlsseg_nxv4i32(ptr %base, i64 %vl) nounwind {
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: slli a1, a1, 1
-; SPILL-O2-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs2r.v v10, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: slli a1, a1, 1
-; SPILL-O2-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl2r.v v6, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 2
; SPILL-O2-NEXT: add sp, sp, a0
@@ -310,16 +310,16 @@ define <vscale x 4 x i32> @spill_zvlsseg_nxv4i32(ptr %base, i64 %vl) nounwind {
; SPILL-O2-VLEN128-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 32
-; SPILL-O2-VLEN128-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs2r.v v10, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: #APP
; SPILL-O2-VLEN128-NEXT: #NO_APP
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 32
-; SPILL-O2-VLEN128-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl2r.v v6, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 64
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
; SPILL-O2-VLEN128-NEXT: ret
@@ -355,17 +355,17 @@ define <vscale x 4 x i32> @spill_zvlsseg_nxv4i32(ptr %base, i64 %vl) nounwind {
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
; SPILL-O2-VSETVLI-NEXT: slli a1, a1, 1
-; SPILL-O2-VSETVLI-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs2r.v v10, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: #APP
; SPILL-O2-VSETVLI-NEXT: #NO_APP
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
; SPILL-O2-VSETVLI-NEXT: slli a1, a1, 1
-; SPILL-O2-VSETVLI-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl2r.v v6, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; SPILL-O2-VSETVLI-NEXT: add sp, sp, a0
; SPILL-O2-VSETVLI-NEXT: addi sp, sp, 16
@@ -413,17 +413,17 @@ define <vscale x 8 x i32> @spill_zvlsseg_nxv8i32(ptr %base, i64 %vl) nounwind {
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: slli a1, a1, 2
-; SPILL-O2-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs4r.v v8, (a0) # vscale x 32-byte Folded Spill
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs4r.v v12, (a0) # vscale x 32-byte Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: slli a1, a1, 2
-; SPILL-O2-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl4r.v v4, (a0) # vscale x 32-byte Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 3
; SPILL-O2-NEXT: add sp, sp, a0
@@ -438,16 +438,16 @@ define <vscale x 8 x i32> @spill_zvlsseg_nxv8i32(ptr %base, i64 %vl) nounwind {
; SPILL-O2-VLEN128-NEXT: vlseg2e32.v v8, (a0)
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 64
-; SPILL-O2-VLEN128-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs4r.v v8, (a0) # vscale x 32-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs4r.v v12, (a0) # vscale x 32-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: #APP
; SPILL-O2-VLEN128-NEXT: #NO_APP
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 64
-; SPILL-O2-VLEN128-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl4r.v v4, (a0) # vscale x 32-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 128
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
; SPILL-O2-VLEN128-NEXT: ret
@@ -483,17 +483,17 @@ define <vscale x 8 x i32> @spill_zvlsseg_nxv8i32(ptr %base, i64 %vl) nounwind {
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
; SPILL-O2-VSETVLI-NEXT: slli a1, a1, 2
-; SPILL-O2-VSETVLI-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs4r.v v8, (a0) # vscale x 32-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs4r.v v12, (a0) # vscale x 32-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: #APP
; SPILL-O2-VSETVLI-NEXT: #NO_APP
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
; SPILL-O2-VSETVLI-NEXT: slli a1, a1, 2
-; SPILL-O2-VSETVLI-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl4r.v v4, (a0) # vscale x 32-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: vsetvli a0, zero, e8, m8, ta, ma
; SPILL-O2-VSETVLI-NEXT: add sp, sp, a0
; SPILL-O2-VSETVLI-NEXT: addi sp, sp, 16
@@ -542,21 +542,21 @@ define <vscale x 4 x i32> @spill_zvlsseg3_nxv4i32(ptr %base, i64 %vl) nounwind {
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: slli a1, a1, 1
-; SPILL-O2-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs2r.v v10, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: vs2r.v v12, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-NEXT: #APP
; SPILL-O2-NEXT: #NO_APP
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: csrr a1, vlenb
; SPILL-O2-NEXT: slli a1, a1, 1
-; SPILL-O2-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl2r.v v6, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
-; SPILL-O2-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vl2r.v v10, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: li a1, 6
; SPILL-O2-NEXT: mul a0, a0, a1
@@ -572,20 +572,20 @@ define <vscale x 4 x i32> @spill_zvlsseg3_nxv4i32(ptr %base, i64 %vl) nounwind {
; SPILL-O2-VLEN128-NEXT: vlseg3e32.v v8, (a0)
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 32
-; SPILL-O2-VLEN128-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs2r.v v10, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VLEN128-NEXT: vs2r.v v12, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: #APP
; SPILL-O2-VLEN128-NEXT: #NO_APP
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: li a1, 32
-; SPILL-O2-VLEN128-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl2r.v v6, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
-; SPILL-O2-VLEN128-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vl2r.v v10, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 96
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
; SPILL-O2-VLEN128-NEXT: ret
@@ -623,21 +623,21 @@ define <vscale x 4 x i32> @spill_zvlsseg3_nxv4i32(ptr %base, i64 %vl) nounwind {
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
; SPILL-O2-VSETVLI-NEXT: slli a1, a1, 1
-; SPILL-O2-VSETVLI-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs2r.v v10, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
+; SPILL-O2-VSETVLI-NEXT: vs2r.v v12, (a0) # vscale x 16-byte Folded Spill
; SPILL-O2-VSETVLI-NEXT: #APP
; SPILL-O2-VSETVLI-NEXT: #NO_APP
; SPILL-O2-VSETVLI-NEXT: addi a0, sp, 16
; SPILL-O2-VSETVLI-NEXT: csrr a1, vlenb
; SPILL-O2-VSETVLI-NEXT: slli a1, a1, 1
-; SPILL-O2-VSETVLI-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl2r.v v6, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: add a0, a0, a1
-; SPILL-O2-VSETVLI-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VSETVLI-NEXT: vl2r.v v10, (a0) # vscale x 16-byte Folded Reload
; SPILL-O2-VSETVLI-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; SPILL-O2-VSETVLI-NEXT: li a1, 6
; SPILL-O2-VSETVLI-NEXT: mul a0, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
index 715765c3006f7..055d9ed630718 100644
--- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
@@ -31,34 +31,34 @@ body: |
; CHECK-NEXT: $v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 undef $v0_v1_v2_v3_v4_v5_v6, renamable $x10, $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
; CHECK-NEXT: $x11 = ADDI $x2, 16
; CHECK-NEXT: $x12 = PseudoReadVLENB
- ; CHECK-NEXT: VS1R_V $v0, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8)
+ ; CHECK-NEXT: VS1R_V $v0, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store (<vscale x 1 x s64>) into %stack.0)
; CHECK-NEXT: $x11 = ADD killed $x11, $x12
- ; CHECK-NEXT: VS1R_V $v1, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8)
+ ; CHECK-NEXT: VS1R_V $v1, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store (<vscale x 1 x s64>) into %stack.0)
; CHECK-NEXT: $x11 = ADD killed $x11, $x12
- ; CHECK-NEXT: VS1R_V $v2, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8)
+ ; CHECK-NEXT: VS1R_V $v2, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store (<vscale x 1 x s64>) into %stack.0)
; CHECK-NEXT: $x11 = ADD killed $x11, $x12
- ; CHECK-NEXT: VS1R_V $v3, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8)
+ ; CHECK-NEXT: VS1R_V $v3, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store (<vscale x 1 x s64>) into %stack.0)
; CHECK-NEXT: $x11 = ADD killed $x11, $x12
- ; CHECK-NEXT: VS1R_V $v4, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8)
+ ; CHECK-NEXT: VS1R_V $v4, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store (<vscale x 1 x s64>) into %stack.0)
; CHECK-NEXT: $x11 = ADD killed $x11, $x12
- ; CHECK-NEXT: VS1R_V $v5, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8)
+ ; CHECK-NEXT: VS1R_V $v5, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store (<vscale x 1 x s64>) into %stack.0)
; CHECK-NEXT: $x11 = ADD killed $x11, killed $x12
- ; CHECK-NEXT: VS1R_V $v6, killed $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8)
+ ; CHECK-NEXT: VS1R_V $v6, killed $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store (<vscale x 1 x s64>) into %stack.0)
; CHECK-NEXT: $x11 = ADDI $x2, 16
; CHECK-NEXT: $x12 = PseudoReadVLENB
- ; CHECK-NEXT: $v7 = VL1RE8_V $x11 :: (load unknown-size from %stack.0, align 8)
+ ; CHECK-NEXT: $v7 = VL1RE8_V $x11 :: (load (<vscale x 1 x s64>) from %stack.0)
; CHECK-NEXT: $x11 = ADD killed $x11, $x12
- ; CHECK-NEXT: $v8 = VL1RE8_V $x11 :: (load unknown-size from %stack.0, align 8)
+ ; CHECK-NEXT: $v8 = VL1RE8_V $x11 :: (load (<vscale x 1 x s64>) from %stack.0)
; CHECK-NEXT: $x11 = ADD killed $x11, $x12
- ; CHECK-NEXT: $v9 = VL1RE8_V $x11 :: (load unknown-size from %stack.0, align 8)
+ ; CHECK-NEXT: $v9 = VL1RE8_V $x11 :: (load (<vscale x 1 x s64>) from %stack.0)
; CHECK-NEXT: $x11 = ADD killed $x11, $x12
- ; CHECK-NEXT: $v10 = VL1RE8_V $x11 :: (load unknown-size from %stack.0, align 8)
+ ; CHECK-NEXT: $v10 = VL1RE8_V $x11 :: (load (<vscale x 1 x s64>) from %stack.0)
; CHECK-NEXT: $x11 = ADD killed $x11, $x12
- ; CHECK-NEXT: $v11 = VL1RE8_V $x11 :: (load unknown-size from %stack.0, align 8)
+ ; CHECK-NEXT: $v11 = VL1RE8_V $x11 :: (load (<vscale x 1 x s64>) from %stack.0)
; CHECK-NEXT: $x11 = ADD killed $x11, $x12
- ; CHECK-NEXT: $v12 = VL1RE8_V $x11 :: (load unknown-size from %stack.0, align 8)
+ ; CHECK-NEXT: $v12 = VL1RE8_V $x11 :: (load (<vscale x 1 x s64>) from %stack.0)
; CHECK-NEXT: $x11 = ADD killed $x11, killed $x12
- ; CHECK-NEXT: $v13 = VL1RE8_V killed $x11 :: (load unknown-size from %stack.0, align 8)
+ ; CHECK-NEXT: $v13 = VL1RE8_V killed $x11 :: (load (<vscale x 1 x s64>) from %stack.0)
; CHECK-NEXT: VS1R_V killed $v8, killed renamable $x10
; CHECK-NEXT: $x10 = frame-destroy PseudoReadVLENB
; CHECK-NEXT: $x10 = frame-destroy SLLI killed $x10, 3
>From 903a2b951607134067dce828852b18192b4c39f0 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Thu, 27 Mar 2025 11:06:56 -0700
Subject: [PATCH 2/2] Adjust to updated API
---
llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index b790b3fd7cebe..b3340b97f4b09 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -411,8 +411,7 @@ void RISCVRegisterInfo::lowerVSPILL(MachineBasicBlock::iterator II) const {
LocationSize OldLoc = OldMMO->getSize();
assert(OldLoc.isPrecise() && OldLoc.getValue().isKnownMultipleOf(NF));
TypeSize NewSize = OldLoc.getValue().divideCoefficientBy(NF);
- auto *NewMMO = MF.getMachineMemOperand(OldMMO, OldMMO->getOffset(),
- LocationSize::precise(NewSize));
+ auto *NewMMO = MF.getMachineMemOperand(OldMMO, OldMMO->getOffset(), NewSize);
for (unsigned I = 0; I < NF; ++I) {
// Adding implicit-use of super register to describe we are using part of
// super register, that prevents machine verifier complaining when part of
@@ -494,8 +493,7 @@ void RISCVRegisterInfo::lowerVRELOAD(MachineBasicBlock::iterator II) const {
LocationSize OldLoc = OldMMO->getSize();
assert(OldLoc.isPrecise() && OldLoc.getValue().isKnownMultipleOf(NF));
TypeSize NewSize = OldLoc.getValue().divideCoefficientBy(NF);
- auto *NewMMO = MF.getMachineMemOperand(OldMMO, OldMMO->getOffset(),
- LocationSize::precise(NewSize));
+ auto *NewMMO = MF.getMachineMemOperand(OldMMO, OldMMO->getOffset(), NewSize);
for (unsigned I = 0; I < NF; ++I) {
BuildMI(MBB, II, DL, TII->get(Opcode),
TRI->getSubReg(DestReg, SubRegIdx + I))
More information about the llvm-commits
mailing list