[llvm] [RISCV] Use a precise size for MMO on scalable spill and fill (PR #133171)

via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 26 15:23:56 PDT 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-risc-v

Author: Philip Reames (preames)

<details>
<summary>Changes</summary>

The primary effect of this is that we get proper scalable sizes printed by the assembler, but this may also enable proper aliasing analysis. I don't see any test changes resulting from the later.

Getting the size is slightly tricky as we store the scalable size as a non-scalable quantity in the object size field for the frame index.  We really should remove that hack at some point...

For the synthetic tuple spills and fills, I dropped the size from the split loads and stores to avoid incorrect (overly large) sizes. We could also divide by the NF factor if we felt like writing the code to do so.

---

Patch is 1.01 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/133171.diff


114 Files Affected:

- (modified) llvm/lib/CodeGen/MachineInstr.cpp (+8-3) 
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfo.cpp (+6-2) 
- (modified) llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp (+9-2) 
- (modified) llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/pr69586.ll (+28-28) 
- (modified) llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll (+38-38) 
- (modified) llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll (+38-38) 
- (modified) llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll (+12-12) 
- (modified) llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll (+24-24) 
- (modified) llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll (+24-24) 
- (modified) llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll (+20-20) 
- (modified) llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll (+20-20) 
- (modified) llvm/test/CodeGen/RISCV/rvv/expandload.ll (+78-78) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll (+38-38) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll (+38-38) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll (+32-32) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll (+12-12) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll (+32-32) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll (+18-18) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll (+18-18) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpowi.ll (+110-110) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll (+110-110) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll (+60-60) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll (+32-32) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-addsub.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll (+62-62) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-fp-interleave.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-int-interleave.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll (+60-60) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll (+16-16) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll (+16-16) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll (+14-14) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll (+14-14) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll (+12-12) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll (+12-12) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll (+12-12) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll (+12-12) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll (+12-12) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll (+12-12) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll (+12-12) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll (+12-12) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll (+24-24) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll (+68-68) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll (+24-24) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll (+68-68) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll (+134-134) 
- (modified) llvm/test/CodeGen/RISCV/rvv/frm-insert.ll (+20-20) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll (+38-38) 
- (modified) llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/pr63596.ll (+6-6) 
- (modified) llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/remat.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll (+24-24) 
- (modified) llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector.ll (+40-40) 
- (modified) llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll (+20-20) 
- (modified) llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll (+28-28) 
- (modified) llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector.ll (+40-40) 
- (modified) llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll (+20-20) 
- (modified) llvm/test/CodeGen/RISCV/rvv/rvv-cfi-info.ll (+24-24) 
- (modified) llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll (+32-32) 
- (modified) llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/stack-folding.ll (+13-13) 
- (modified) llvm/test/CodeGen/RISCV/rvv/stack-slot-coloring.mir (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/stores-of-loads-merging.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll (+12-12) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll (+20-20) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll (+12-12) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll (+20-20) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll (+530-530) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll (+44-44) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll (+72-72) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmax-sdnode.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmin-sdnode.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll (+18-18) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll (+10-10) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll (+22-22) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll (+18-18) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll (+12-12) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll (+20-20) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll (+6-6) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll (+6-6) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll (+4-4) 


``````````diff
diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp
index 471666568e79a..2409e601ceb46 100644
--- a/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/llvm/lib/CodeGen/MachineInstr.cpp
@@ -2525,7 +2525,7 @@ using MMOList = SmallVector<const MachineMemOperand *, 2>;
 
 static LocationSize getSpillSlotSize(const MMOList &Accesses,
                                      const MachineFrameInfo &MFI) {
-  uint64_t Size = 0;
+  std::optional<TypeSize> Size;
   for (const auto *A : Accesses) {
     if (MFI.isSpillSlotObjectIndex(
             cast<FixedStackPseudoSourceValue>(A->getPseudoValue())
@@ -2533,10 +2533,15 @@ static LocationSize getSpillSlotSize(const MMOList &Accesses,
       LocationSize S = A->getSize();
       if (!S.hasValue())
         return LocationSize::beforeOrAfterPointer();
-      Size += S.getValue();
+      if (!Size)
+        Size = S.getValue();
+      else
+        Size = *Size + S.getValue();
     }
   }
-  return Size;
+  if (!Size)
+    return LocationSize::precise(0);
+  return LocationSize::precise(*Size);
 }
 
 std::optional<LocationSize>
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 4fb11f278df97..d6dfa4cabe35c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -705,9 +705,11 @@ void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
     llvm_unreachable("Can't store this register to stack slot");
 
   if (IsScalableVector) {
+    LocationSize LocSize =
+        LocationSize::precise(TypeSize::getScalable(MFI.getObjectSize(FI)));
     MachineMemOperand *MMO = MF->getMachineMemOperand(
         MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
-        LocationSize::beforeOrAfterPointer(), MFI.getObjectAlign(FI));
+        LocSize, MFI.getObjectAlign(FI));
 
     MFI.setStackID(FI, TargetStackID::ScalableVector);
     BuildMI(MBB, I, DebugLoc(), get(Opcode))
@@ -797,9 +799,11 @@ void RISCVInstrInfo::loadRegFromStackSlot(
     llvm_unreachable("Can't load this register from stack slot");
 
   if (IsScalableVector) {
+    LocationSize LocSize =
+        LocationSize::precise(TypeSize::getScalable(MFI.getObjectSize(FI)));
     MachineMemOperand *MMO = MF->getMachineMemOperand(
         MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
-        LocationSize::beforeOrAfterPointer(), MFI.getObjectAlign(FI));
+        LocSize, MFI.getObjectAlign(FI));
 
     MFI.setStackID(FI, TargetStackID::ScalableVector);
     BuildMI(MBB, I, DL, get(Opcode), DstReg)
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index ab2046d39df6d..df7e0c8f022c8 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -406,6 +406,10 @@ void RISCVRegisterInfo::lowerVSPILL(MachineBasicBlock::iterator II) const {
   Register Base = II->getOperand(1).getReg();
   bool IsBaseKill = II->getOperand(1).isKill();
   Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+
+  auto *OldMMO = *(II->memoperands_begin());
+  auto *NewMMO = MF.getMachineMemOperand(OldMMO, OldMMO->getOffset(),
+                                         LocationSize::beforeOrAfterPointer());
   for (unsigned I = 0; I < NF; ++I) {
     // Adding implicit-use of super register to describe we are using part of
     // super register, that prevents machine verifier complaining when part of
@@ -414,7 +418,7 @@ void RISCVRegisterInfo::lowerVSPILL(MachineBasicBlock::iterator II) const {
     BuildMI(MBB, II, DL, TII->get(Opcode))
         .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I))
         .addReg(Base, getKillRegState(I == NF - 1))
-        .addMemOperand(*(II->memoperands_begin()))
+        .addMemOperand(NewMMO)
         .addReg(SrcReg, RegState::Implicit);
     if (I != NF - 1)
       BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
@@ -483,11 +487,14 @@ void RISCVRegisterInfo::lowerVRELOAD(MachineBasicBlock::iterator II) const {
   Register Base = II->getOperand(1).getReg();
   bool IsBaseKill = II->getOperand(1).isKill();
   Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+  auto *OldMMO = *(II->memoperands_begin());
+  auto *NewMMO = MF.getMachineMemOperand(OldMMO, OldMMO->getOffset(),
+                                         LocationSize::beforeOrAfterPointer());
   for (unsigned I = 0; I < NF; ++I) {
     BuildMI(MBB, II, DL, TII->get(Opcode),
             TRI->getSubReg(DestReg, SubRegIdx + I))
         .addReg(Base, getKillRegState(I == NF - 1))
-        .addMemOperand(*(II->memoperands_begin()));
+        .addMemOperand(NewMMO);
     if (I != NF - 1)
       BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
           .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
diff --git a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
index 26e86d41176e0..4c71e6c4a4627 100644
--- a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
+++ b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
@@ -32,7 +32,7 @@ define void @_Z3foov() {
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    vs1r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    vs1r.v v10, (a0) # vscale x 8-byte Folded Spill
 ; CHECK-NEXT:    lui a0, %hi(.L__const._Z3foov.var_46)
 ; CHECK-NEXT:    addi a0, a0, %lo(.L__const._Z3foov.var_46)
 ; CHECK-NEXT:    vle16.v v10, (a0)
@@ -72,7 +72,7 @@ define void @_Z3foov() {
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    vl1r.v v14, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vl1r.v v14, (a0) # vscale x 8-byte Folded Reload
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, tu, mu
 ; CHECK-NEXT:    vsext.vf2 v8, v14, v0.t
 ; CHECK-NEXT:    lui a0, %hi(.L__const._Z3foov.var_44)
diff --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
index 111b3e2bf82ce..97d102561129d 100644
--- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
+++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
@@ -64,7 +64,7 @@ define i64 @ctz_nxv8i1_no_range(<vscale x 8 x i16> %a) {
 ; RV32-NEXT:    sub sp, sp, a0
 ; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
 ; RV32-NEXT:    addi a0, sp, 32
-; RV32-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    srli a0, a0, 3
 ; RV32-NEXT:    li a2, 8
@@ -79,7 +79,7 @@ define i64 @ctz_nxv8i1_no_range(<vscale x 8 x i16> %a) {
 ; RV32-NEXT:    vid.v v8
 ; RV32-NEXT:    li a2, -1
 ; RV32-NEXT:    addi a3, sp, 32
-; RV32-NEXT:    vl2r.v v24, (a3) # Unknown-size Folded Reload
+; RV32-NEXT:    vl2r.v v24, (a3) # vscale x 16-byte Folded Reload
 ; RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; RV32-NEXT:    vmsne.vi v0, v24, 0
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/pr69586.ll b/llvm/test/CodeGen/RISCV/pr69586.ll
index 9fc9a3c42867e..4ab48930ae78c 100644
--- a/llvm/test/CodeGen/RISCV/pr69586.ll
+++ b/llvm/test/CodeGen/RISCV/pr69586.ll
@@ -102,7 +102,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; NOREMAT-NEXT:    sf.vc.vv 3, 0, v8, v10
 ; NOREMAT-NEXT:    vle32.v v8, (a4)
 ; NOREMAT-NEXT:    addi a0, sp, 640
-; NOREMAT-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; NOREMAT-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
 ; NOREMAT-NEXT:    add a4, a7, t4
 ; NOREMAT-NEXT:    vle32.v v10, (a4)
 ; NOREMAT-NEXT:    sf.vc.vv 3, 0, v2, v0
@@ -377,7 +377,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; NOREMAT-NEXT:    vle32.v v14, (a2)
 ; NOREMAT-NEXT:    vle32.v v30, (a2)
 ; NOREMAT-NEXT:    addi a0, sp, 640
-; NOREMAT-NEXT:    vl2r.v v12, (a0) # Unknown-size Folded Reload
+; NOREMAT-NEXT:    vl2r.v v12, (a0) # vscale x 16-byte Folded Reload
 ; NOREMAT-NEXT:    sf.vc.vv 3, 0, v12, v22
 ; NOREMAT-NEXT:    addiw a2, s0, -512
 ; NOREMAT-NEXT:    sd a2, 336(sp) # 8-byte Folded Spill
@@ -998,7 +998,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    slli a2, a2, 4
 ; REMAT-NEXT:    add a2, sp, a2
 ; REMAT-NEXT:    addi a2, a2, 432
-; REMAT-NEXT:    vs2r.v v6, (a2) # Unknown-size Folded Spill
+; REMAT-NEXT:    vs2r.v v6, (a2) # vscale x 16-byte Folded Spill
 ; REMAT-NEXT:    add a2, a0, t2
 ; REMAT-NEXT:    vle32.v v4, (a0)
 ; REMAT-NEXT:    vle32.v v2, (a2)
@@ -1008,7 +1008,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    mul a2, a2, a5
 ; REMAT-NEXT:    add a2, sp, a2
 ; REMAT-NEXT:    addi a2, a2, 432
-; REMAT-NEXT:    vs2r.v v6, (a2) # Unknown-size Folded Spill
+; REMAT-NEXT:    vs2r.v v6, (a2) # vscale x 16-byte Folded Spill
 ; REMAT-NEXT:    add a2, a0, t3
 ; REMAT-NEXT:    sf.vc.vv 3, 0, v4, v8
 ; REMAT-NEXT:    vle32.v v4, (a2)
@@ -1027,7 +1027,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    mul a2, a2, a5
 ; REMAT-NEXT:    add a2, sp, a2
 ; REMAT-NEXT:    addi a2, a2, 432
-; REMAT-NEXT:    vs2r.v v8, (a2) # Unknown-size Folded Spill
+; REMAT-NEXT:    vs2r.v v8, (a2) # vscale x 16-byte Folded Spill
 ; REMAT-NEXT:    add a2, a0, t6
 ; REMAT-NEXT:    vle32.v v18, (a2)
 ; REMAT-NEXT:    sf.vc.vv 3, 0, v20, v22
@@ -1046,7 +1046,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    slli a5, a5, 4
 ; REMAT-NEXT:    add a5, sp, a5
 ; REMAT-NEXT:    addi a5, a5, 432
-; REMAT-NEXT:    vl2r.v v12, (a5) # Unknown-size Folded Reload
+; REMAT-NEXT:    vl2r.v v12, (a5) # vscale x 16-byte Folded Reload
 ; REMAT-NEXT:    sf.vc.vv 3, 0, v12, v2
 ; REMAT-NEXT:    vle32.v v2, (a2)
 ; REMAT-NEXT:    add a2, a0, s3
@@ -1056,7 +1056,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    mul a5, a5, a6
 ; REMAT-NEXT:    add a5, sp, a5
 ; REMAT-NEXT:    addi a5, a5, 432
-; REMAT-NEXT:    vl2r.v v16, (a5) # Unknown-size Folded Reload
+; REMAT-NEXT:    vl2r.v v16, (a5) # vscale x 16-byte Folded Reload
 ; REMAT-NEXT:    sf.vc.vv 3, 0, v16, v4
 ; REMAT-NEXT:    vle32.v v30, (a2)
 ; REMAT-NEXT:    add a2, a0, s4
@@ -1074,7 +1074,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    mul a5, a5, a6
 ; REMAT-NEXT:    add a5, sp, a5
 ; REMAT-NEXT:    addi a5, a5, 432
-; REMAT-NEXT:    vl2r.v v0, (a5) # Unknown-size Folded Reload
+; REMAT-NEXT:    vl2r.v v0, (a5) # vscale x 16-byte Folded Reload
 ; REMAT-NEXT:    sf.vc.vv 3, 0, v0, v18
 ; REMAT-NEXT:    vle32.v v0, (a2)
 ; REMAT-NEXT:    add a2, a0, s7
@@ -1097,7 +1097,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    slli a2, a2, 3
 ; REMAT-NEXT:    add a2, sp, a2
 ; REMAT-NEXT:    addi a2, a2, 432
-; REMAT-NEXT:    vs2r.v v12, (a2) # Unknown-size Folded Spill
+; REMAT-NEXT:    vs2r.v v12, (a2) # vscale x 16-byte Folded Spill
 ; REMAT-NEXT:    add a2, a0, s11
 ; REMAT-NEXT:    vle32.v v12, (a2)
 ; REMAT-NEXT:    sf.vc.vv 3, 0, v30, v16
@@ -1110,7 +1110,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    slli a2, a2, 1
 ; REMAT-NEXT:    add a2, sp, a2
 ; REMAT-NEXT:    addi a2, a2, 432
-; REMAT-NEXT:    vs2r.v v10, (a2) # Unknown-size Folded Spill
+; REMAT-NEXT:    vs2r.v v10, (a2) # vscale x 16-byte Folded Spill
 ; REMAT-NEXT:    add a2, a0, a4
 ; REMAT-NEXT:    vle32.v v10, (a2)
 ; REMAT-NEXT:    sf.vc.vv 3, 0, v4, v14
@@ -1119,7 +1119,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    slli a2, a2, 2
 ; REMAT-NEXT:    add a2, sp, a2
 ; REMAT-NEXT:    addi a2, a2, 432
-; REMAT-NEXT:    vs2r.v v14, (a2) # Unknown-size Folded Spill
+; REMAT-NEXT:    vs2r.v v14, (a2) # vscale x 16-byte Folded Spill
 ; REMAT-NEXT:    add a2, a0, a3
 ; REMAT-NEXT:    vle32.v v14, (a2)
 ; REMAT-NEXT:    sf.vc.vv 3, 0, v0, v18
@@ -1128,13 +1128,13 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    slli a2, a2, 4
 ; REMAT-NEXT:    add a2, sp, a2
 ; REMAT-NEXT:    addi a2, a2, 432
-; REMAT-NEXT:    vs2r.v v18, (a2) # Unknown-size Folded Spill
+; REMAT-NEXT:    vs2r.v v18, (a2) # vscale x 16-byte Folded Spill
 ; REMAT-NEXT:    li a5, 7
 ; REMAT-NEXT:    slli a5, a5, 11
 ; REMAT-NEXT:    add a2, a0, a5
 ; REMAT-NEXT:    vle32.v v18, (a2)
 ; REMAT-NEXT:    addi a3, sp, 432
-; REMAT-NEXT:    vs2r.v v18, (a3) # Unknown-size Folded Spill
+; REMAT-NEXT:    vs2r.v v18, (a3) # vscale x 16-byte Folded Spill
 ; REMAT-NEXT:    sf.vc.vv 3, 0, v22, v20
 ; REMAT-NEXT:    vle32.v v18, (a2)
 ; REMAT-NEXT:    csrr a2, vlenb
@@ -1142,7 +1142,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    mul a2, a2, a3
 ; REMAT-NEXT:    add a2, sp, a2
 ; REMAT-NEXT:    addi a2, a2, 432
-; REMAT-NEXT:    vs2r.v v18, (a2) # Unknown-size Folded Spill
+; REMAT-NEXT:    vs2r.v v18, (a2) # vscale x 16-byte Folded Spill
 ; REMAT-NEXT:    li a2, 29
 ; REMAT-NEXT:    slli a2, a2, 9
 ; REMAT-NEXT:    add a2, a0, a2
@@ -1154,7 +1154,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    mul a2, a2, a3
 ; REMAT-NEXT:    add a2, sp, a2
 ; REMAT-NEXT:    addi a2, a2, 432
-; REMAT-NEXT:    vs2r.v v20, (a2) # Unknown-size Folded Spill
+; REMAT-NEXT:    vs2r.v v20, (a2) # vscale x 16-byte Folded Spill
 ; REMAT-NEXT:    li a2, 15
 ; REMAT-NEXT:    slli a2, a2, 10
 ; REMAT-NEXT:    add a2, a0, a2
@@ -1166,7 +1166,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    mul a2, a2, a3
 ; REMAT-NEXT:    add a2, sp, a2
 ; REMAT-NEXT:    addi a2, a2, 432
-; REMAT-NEXT:    vs2r.v v8, (a2) # Unknown-size Folded Spill
+; REMAT-NEXT:    vs2r.v v8, (a2) # vscale x 16-byte Folded Spill
 ; REMAT-NEXT:    li a2, 31
 ; REMAT-NEXT:    slli a2, a2, 9
 ; REMAT-NEXT:    add a2, a0, a2
@@ -1175,14 +1175,14 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    slli a3, a3, 3
 ; REMAT-NEXT:    add a3, sp, a3
 ; REMAT-NEXT:    addi a3, a3, 432
-; REMAT-NEXT:    vl2r.v v8, (a3) # Unknown-size Folded Reload
+; REMAT-NEXT:    vl2r.v v8, (a3) # vscale x 16-byte Folded Reload
 ; REMAT-NEXT:    sf.vc.vv 3, 0, v8, v12
 ; REMAT-NEXT:    vle32.v v8, (a2)
 ; REMAT-NEXT:    csrr a2, vlenb
 ; REMAT-NEXT:    slli a2, a2, 3
 ; REMAT-NEXT:    add a2, sp, a2
 ; REMAT-NEXT:    addi a2, a2, 432
-; REMAT-NEXT:    vs2r.v v8, (a2) # Unknown-size Folded Spill
+; REMAT-NEXT:    vs2r.v v8, (a2) # vscale x 16-byte Folded Spill
 ; REMAT-NEXT:    lui a2, 4
 ; REMAT-NEXT:    add a2, a0, a2
 ; REMAT-NEXT:    vle32.v v4, (a2)
@@ -1193,7 +1193,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    mul a2, a2, a3
 ; REMAT-NEXT:    add a2, sp, a2
 ; REMAT-NEXT:    addi a2, a2, 432
-; REMAT-NEXT:    vs2r.v v8, (a2) # Unknown-size Folded Spill
+; REMAT-NEXT:    vs2r.v v8, (a2) # vscale x 16-byte Folded Spill
 ; REMAT-NEXT:    lui a2, 4
 ; REMAT-NEXT:    addiw a2, a2, 512
 ; REMAT-NEXT:    add a2, a0, a2
@@ -1202,7 +1202,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    slli a3, a3, 1
 ; REMAT-NEXT:    add a3, sp, a3
 ; REMAT-NEXT:    addi a3, a3, 432
-; REMAT-NEXT:    vl2r.v v8, (a3) # Unknown-size Folded Reload
+; REMAT-NEXT:    vl2r.v v8, (a3) # vscale x 16-byte Folded Reload
 ; REMAT-NEXT:    sf.vc.vv 3, 0, v8, v10
 ; REMAT-NEXT:    vle32.v v20, (a2)
 ; REMAT-NEXT:    li a2, 17
@@ -1213,7 +1213,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    slli a3, a3, 2
 ; REMAT-NEXT:    add a3, sp, a3
 ; REMAT-NEXT:    addi a3, a3, 432
-; REMAT-NEXT:    vl2r.v v8, (a3) # Unknown-size Folded Reload
+; REMAT-NEXT:    vl2r.v v8, (a3) # vscale x 16-byte Folded Reload
 ; REMAT-NEXT:    sf.vc.vv 3, 0, v8, v14
 ; REMAT-NEXT:    vle32.v v22, (a2)
 ; REMAT-NEXT:    lui a2, 4
@@ -1224,9 +1224,9 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    slli a3, a3, 4
 ; REMAT-NEXT:    add a3, sp, a3
 ; REMAT-NEXT:    addi a3, a3, 432
-; REMAT-NEXT:    vl2r.v v8, (a3) # Unknown-size Folded Reload
+; REMAT-NEXT:    vl2r.v v8, (a3) # vscale x 16-byte Folded Reload
 ; REMAT-NEXT:    addi a3, sp, 432
-; REMAT-NEXT:    vl2r.v v10, (a3) # Unknown-size Folded Reload
+; REMAT-NEXT:    vl2r.v v10, (a3) # vscale x 16-byte Folded Reload
 ; REMAT-NEXT:    sf.vc.vv 3, 0, v8, v10
 ; REMAT-NEXT:    vle32.v v8, (a2)
 ; REMAT-NEXT:    li a2, 9
@@ -1238,7 +1238,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    mul a3, a3, a4
 ; REMAT-NEXT:    add a3, sp, a3
 ; REMAT-NEXT:    addi a3, a3, 432
-; REMAT-NEXT:    vl2r.v v10, (a3) # Unknown-size Folded Reload
+; REMAT-NEXT:    vl2r.v v10, (a3) # vscale x 16-byte Folded Reload
 ; REMAT-NEXT:    sf.vc.vv 3, 0, v10, v18
 ; REMAT-NEXT:    vle32.v v10, (a2)
 ; REMAT-NEXT:    lui a2, 5
@@ -1250,7 +1250,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    mul a3, a3, a4
 ; REMAT-NEXT:    add a3, sp, a3
 ; REMAT-NEXT:    addi a3, a3, 432
-; REMAT-NEXT:    vl2r.v v12, (a3) # Unknown-size Folded Reload
+; REMAT-NEXT:    vl2r.v v12, (a3) # vscale x 16-byte Folded Reload
 ; REMAT-NEXT:    sf.vc.vv 3, 0, v12, v30
 ; REMAT-NEXT:    vle32.v v12, (a2)
 ; REMAT-NEXT:    li a2, 19
@@ -1262,7 +1262,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    mul a3, a3, a4
 ; REMAT-NEXT:    add a3, sp, a3
 ; REMAT-NEXT:    addi a3, a3, 432
-; REMAT-NEXT:    vl2r.v v14, (a3) # Unknown-size Folded Reload
+; REMAT-NEXT:    vl2r.v v14, (a3) # vscale x 16-byte Folded Reload
 ; REMAT-NEXT:    sf.vc.vv 3, 0, v14, v6
 ; REMAT-NEXT:    vle32.v v14, (a2)
 ; REMAT-NEXT:    lui a2, 5
@@ -1273,7 +1273,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    slli a3, a3, 3
 ; REMAT-NEXT:    add a3, sp, a3
 ; REMAT-NEXT:    addi a3, a3, 432
-; REMAT-NEXT:    vl2r.v v16, (a3) # Unknown-size Folded Reload
+; REMAT-NEXT:    vl2r.v v16, (a3) # vscale x 16-byte Folded Reload
 ; REMAT-NEXT:    sf.vc.vv 3, 0, v16, v4
 ; REMAT-NEXT:    vle32.v v16, (a2)
 ; REMAT-NEXT:    lui a2, 5
@@ -1284,7 +1284,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
 ; REMAT-NEXT:    mul a3, a3, a4
 ; REMAT-NEXT:    add a3, sp, a3
 ; REMAT-NEXT:    addi a3, a3, 432
-; REMAT-NEXT:    vl2r.v v18, (a3) # Unknown-size Folded Reload
+; REMAT-NEXT:    vl2r.v v18, (a3) # vscale x 16-byte Folded Reload
 ; REMAT-NEXT:    sf.vc.vv 3, 0, v18, v2
 ; REMAT-NEXT:    vle32.v v18, (a2)
 ; REMAT-NEXT:    lui a2, 5
diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
index fb0c11e930b3b..7cb1fbf9a2344 100644
--- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
+++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
@@ -41,7 +41,7 @@ define void @last_chance_recoloring_failure() {
 ; CHECK-NEXT:    vsetvli zero, s0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwadd.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
 ; CHECK-NEXT:    call func
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
@@ -53,7 +53,7 @@ define void @last_chance_recoloring_failure() {
 ; CHECK-NEXT:    add a0, a0, a1
 ; CHECK-NEXT:    vl4r.v v20, (a0) # Unknown-size Folded Reload
 ; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
 ; CHECK-NEXT:    vsetvli zero, s0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwsub.wv v8, v24, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, tu, mu
@@ -101,7 +101,7 @@ define void @last_chance_recoloring_failure() {
 ; SUBREGLIVENESS-NEXT:    vsetvli zero, s0, e16, m4, ta, ma
 ; SUBREGLIVENESS-NEXT:    vfwadd.vv v16, v8, v12, v0.t
 ; SUBREGLIVENESS-NEXT:    addi a0, sp, 16
-; SUBREGLIVENESS-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
+; SUBREGLIVENESS-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
 ; SUBREGLIVENESS-NEXT:    call func
 ; SUBREGLIVENESS-NEXT:    csrr a0, vlenb
 ; SUBREGLIVENESS-NEXT:    slli a0, a0, 3
@@ -113,7 +113,7 @@ define void @last_chance_recoloring_failure() {
 ; SUBREGLIVENESS-NEXT:    add a0, a0, a1
 ; SUBREGLIVENESS-NEXT:    vl4r.v v20, (a0) # Unknown-size Folded Reload
 ; SUBREGLIVENESS-NEXT:    addi a0, sp, 16
-; SUBREGLIVENESS-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
+; SUBREGLIVENESS-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Fol...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/133171


More information about the llvm-commits mailing list