[llvm-branch-commits] [llvm] [AMDGPU] Make getNumSubRegsForSpillOp externally available (NFC). (PR #174997)
Christudasan Devadasan via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Mon Jan 12 21:08:06 PST 2026
https://github.com/cdevadas updated https://github.com/llvm/llvm-project/pull/174997
>From 2f15df22b2394cf8a0d09e18238209ce9de4a84d Mon Sep 17 00:00:00 2001
From: Christudasan Devadasan <Christudasan.Devadasan at amd.com>
Date: Wed, 7 Jan 2026 09:34:03 +0000
Subject: [PATCH 1/3] [AMDGPU] Make getNumSubRegsForSpillOp externally
available (NFC).
---
llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp | 6 +++---
llvm/lib/Target/AMDGPU/SIRegisterInfo.h | 2 ++
2 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index e5e9419b1c8c4..af89bde6f1a1d 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -1124,8 +1124,8 @@ SIRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
return RC == &AMDGPU::SCC_CLASSRegClass ? &AMDGPU::SReg_32RegClass : RC;
}
-static unsigned getNumSubRegsForSpillOp(const MachineInstr &MI,
- const SIInstrInfo *TII) {
+unsigned SIRegisterInfo::getNumSubRegsForSpillOp(const MachineInstr &MI) const {
+ const SIInstrInfo *TII = ST.getInstrInfo();
unsigned Op = MI.getOpcode();
switch (Op) {
@@ -2462,7 +2462,7 @@ bool SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
*MBB, MI, DL, Opc, Index, VData->getReg(), VData->isKill(), FrameReg,
TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
*MI->memoperands_begin(), RS);
- MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(*MI, TII));
+ MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(*MI));
if (IsWWMRegSpill)
TII->restoreExec(*MF, *MBB, MI, DL, MFI->getSGPRForEXECCopy());
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
index 2e2916f68f584..b959fe7cf2db1 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
@@ -164,6 +164,8 @@ class SIRegisterInfo final : public AMDGPUGenRegisterInfo {
const TargetRegisterClass *
getCrossCopyRegClass(const TargetRegisterClass *RC) const override;
+ unsigned getNumSubRegsForSpillOp(const MachineInstr &MI) const;
+
const TargetRegisterClass *
getRegClassForBlockOp(const MachineFunction &MF) const {
return &AMDGPU::VReg_1024RegClass;
>From 7cae9252823a45e86ea23244129d40b2d7ee1da9 Mon Sep 17 00:00:00 2001
From: Christudasan Devadasan <Christudasan.Devadasan at amd.com>
Date: Mon, 12 Jan 2026 12:50:39 +0000
Subject: [PATCH 2/3] moved the implementation to SIInstrInfo.
---
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 146 +++++++++++++++++++++
llvm/lib/Target/AMDGPU/SIInstrInfo.h | 3 +
llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp | 150 +---------------------
llvm/lib/Target/AMDGPU/SIRegisterInfo.h | 2 -
4 files changed, 150 insertions(+), 151 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 84b02262dcf73..7dee976ae3c50 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -6147,6 +6147,152 @@ const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
return RegClass < 0 ? nullptr : RI.getRegClass(RegClass);
}
+unsigned SIInstrInfo::getNumSubRegsForSpillOp(const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
+ case AMDGPU::SI_BLOCK_SPILL_V1024_SAVE:
+ case AMDGPU::SI_BLOCK_SPILL_V1024_RESTORE:
+ // FIXME: This assumes the mask is statically known and not computed at
+ // runtime. However, some ABIs may want to compute the mask dynamically and
+ // this will need to be updated.
+ return llvm::popcount(
+ (uint64_t)getNamedOperand(MI, AMDGPU::OpName::mask)->getImm());
+ case AMDGPU::SI_SPILL_S1024_SAVE:
+ case AMDGPU::SI_SPILL_S1024_RESTORE:
+ case AMDGPU::SI_SPILL_V1024_SAVE:
+ case AMDGPU::SI_SPILL_V1024_RESTORE:
+ case AMDGPU::SI_SPILL_A1024_SAVE:
+ case AMDGPU::SI_SPILL_A1024_RESTORE:
+ case AMDGPU::SI_SPILL_AV1024_SAVE:
+ case AMDGPU::SI_SPILL_AV1024_RESTORE:
+ return 32;
+ case AMDGPU::SI_SPILL_S512_SAVE:
+ case AMDGPU::SI_SPILL_S512_RESTORE:
+ case AMDGPU::SI_SPILL_V512_SAVE:
+ case AMDGPU::SI_SPILL_V512_RESTORE:
+ case AMDGPU::SI_SPILL_A512_SAVE:
+ case AMDGPU::SI_SPILL_A512_RESTORE:
+ case AMDGPU::SI_SPILL_AV512_SAVE:
+ case AMDGPU::SI_SPILL_AV512_RESTORE:
+ return 16;
+ case AMDGPU::SI_SPILL_S384_SAVE:
+ case AMDGPU::SI_SPILL_S384_RESTORE:
+ case AMDGPU::SI_SPILL_V384_SAVE:
+ case AMDGPU::SI_SPILL_V384_RESTORE:
+ case AMDGPU::SI_SPILL_A384_SAVE:
+ case AMDGPU::SI_SPILL_A384_RESTORE:
+ case AMDGPU::SI_SPILL_AV384_SAVE:
+ case AMDGPU::SI_SPILL_AV384_RESTORE:
+ return 12;
+ case AMDGPU::SI_SPILL_S352_SAVE:
+ case AMDGPU::SI_SPILL_S352_RESTORE:
+ case AMDGPU::SI_SPILL_V352_SAVE:
+ case AMDGPU::SI_SPILL_V352_RESTORE:
+ case AMDGPU::SI_SPILL_A352_SAVE:
+ case AMDGPU::SI_SPILL_A352_RESTORE:
+ case AMDGPU::SI_SPILL_AV352_SAVE:
+ case AMDGPU::SI_SPILL_AV352_RESTORE:
+ return 11;
+ case AMDGPU::SI_SPILL_S320_SAVE:
+ case AMDGPU::SI_SPILL_S320_RESTORE:
+ case AMDGPU::SI_SPILL_V320_SAVE:
+ case AMDGPU::SI_SPILL_V320_RESTORE:
+ case AMDGPU::SI_SPILL_A320_SAVE:
+ case AMDGPU::SI_SPILL_A320_RESTORE:
+ case AMDGPU::SI_SPILL_AV320_SAVE:
+ case AMDGPU::SI_SPILL_AV320_RESTORE:
+ return 10;
+ case AMDGPU::SI_SPILL_S288_SAVE:
+ case AMDGPU::SI_SPILL_S288_RESTORE:
+ case AMDGPU::SI_SPILL_V288_SAVE:
+ case AMDGPU::SI_SPILL_V288_RESTORE:
+ case AMDGPU::SI_SPILL_A288_SAVE:
+ case AMDGPU::SI_SPILL_A288_RESTORE:
+ case AMDGPU::SI_SPILL_AV288_SAVE:
+ case AMDGPU::SI_SPILL_AV288_RESTORE:
+ return 9;
+ case AMDGPU::SI_SPILL_S256_SAVE:
+ case AMDGPU::SI_SPILL_S256_RESTORE:
+ case AMDGPU::SI_SPILL_V256_SAVE:
+ case AMDGPU::SI_SPILL_V256_RESTORE:
+ case AMDGPU::SI_SPILL_A256_SAVE:
+ case AMDGPU::SI_SPILL_A256_RESTORE:
+ case AMDGPU::SI_SPILL_AV256_SAVE:
+ case AMDGPU::SI_SPILL_AV256_RESTORE:
+ return 8;
+ case AMDGPU::SI_SPILL_S224_SAVE:
+ case AMDGPU::SI_SPILL_S224_RESTORE:
+ case AMDGPU::SI_SPILL_V224_SAVE:
+ case AMDGPU::SI_SPILL_V224_RESTORE:
+ case AMDGPU::SI_SPILL_A224_SAVE:
+ case AMDGPU::SI_SPILL_A224_RESTORE:
+ case AMDGPU::SI_SPILL_AV224_SAVE:
+ case AMDGPU::SI_SPILL_AV224_RESTORE:
+ return 7;
+ case AMDGPU::SI_SPILL_S192_SAVE:
+ case AMDGPU::SI_SPILL_S192_RESTORE:
+ case AMDGPU::SI_SPILL_V192_SAVE:
+ case AMDGPU::SI_SPILL_V192_RESTORE:
+ case AMDGPU::SI_SPILL_A192_SAVE:
+ case AMDGPU::SI_SPILL_A192_RESTORE:
+ case AMDGPU::SI_SPILL_AV192_SAVE:
+ case AMDGPU::SI_SPILL_AV192_RESTORE:
+ return 6;
+ case AMDGPU::SI_SPILL_S160_SAVE:
+ case AMDGPU::SI_SPILL_S160_RESTORE:
+ case AMDGPU::SI_SPILL_V160_SAVE:
+ case AMDGPU::SI_SPILL_V160_RESTORE:
+ case AMDGPU::SI_SPILL_A160_SAVE:
+ case AMDGPU::SI_SPILL_A160_RESTORE:
+ case AMDGPU::SI_SPILL_AV160_SAVE:
+ case AMDGPU::SI_SPILL_AV160_RESTORE:
+ return 5;
+ case AMDGPU::SI_SPILL_S128_SAVE:
+ case AMDGPU::SI_SPILL_S128_RESTORE:
+ case AMDGPU::SI_SPILL_V128_SAVE:
+ case AMDGPU::SI_SPILL_V128_RESTORE:
+ case AMDGPU::SI_SPILL_A128_SAVE:
+ case AMDGPU::SI_SPILL_A128_RESTORE:
+ case AMDGPU::SI_SPILL_AV128_SAVE:
+ case AMDGPU::SI_SPILL_AV128_RESTORE:
+ return 4;
+ case AMDGPU::SI_SPILL_S96_SAVE:
+ case AMDGPU::SI_SPILL_S96_RESTORE:
+ case AMDGPU::SI_SPILL_V96_SAVE:
+ case AMDGPU::SI_SPILL_V96_RESTORE:
+ case AMDGPU::SI_SPILL_A96_SAVE:
+ case AMDGPU::SI_SPILL_A96_RESTORE:
+ case AMDGPU::SI_SPILL_AV96_SAVE:
+ case AMDGPU::SI_SPILL_AV96_RESTORE:
+ return 3;
+ case AMDGPU::SI_SPILL_S64_SAVE:
+ case AMDGPU::SI_SPILL_S64_RESTORE:
+ case AMDGPU::SI_SPILL_V64_SAVE:
+ case AMDGPU::SI_SPILL_V64_RESTORE:
+ case AMDGPU::SI_SPILL_A64_SAVE:
+ case AMDGPU::SI_SPILL_A64_RESTORE:
+ case AMDGPU::SI_SPILL_AV64_SAVE:
+ case AMDGPU::SI_SPILL_AV64_RESTORE:
+ return 2;
+ case AMDGPU::SI_SPILL_S32_SAVE:
+ case AMDGPU::SI_SPILL_S32_RESTORE:
+ case AMDGPU::SI_SPILL_V32_SAVE:
+ case AMDGPU::SI_SPILL_V32_RESTORE:
+ case AMDGPU::SI_SPILL_A32_SAVE:
+ case AMDGPU::SI_SPILL_A32_RESTORE:
+ case AMDGPU::SI_SPILL_AV32_SAVE:
+ case AMDGPU::SI_SPILL_AV32_RESTORE:
+ case AMDGPU::SI_SPILL_WWM_V32_SAVE:
+ case AMDGPU::SI_SPILL_WWM_V32_RESTORE:
+ case AMDGPU::SI_SPILL_WWM_AV32_SAVE:
+ case AMDGPU::SI_SPILL_WWM_AV32_RESTORE:
+ case AMDGPU::SI_SPILL_V16_SAVE:
+ case AMDGPU::SI_SPILL_V16_RESTORE:
+ return 1;
+ default:
+ llvm_unreachable("Invalid spill opcode");
+ }
+}
+
void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const {
MachineBasicBlock::iterator I = MI;
MachineBasicBlock *MBB = MI.getParent();
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index bd4029ca97014..1e46fe0e2d511 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -1392,6 +1392,9 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
return RI.getRegSizeInBits(*getOpRegClass(MI, OpNo)) / 8;
}
+ /// Return the number of registers spilled/reloaded by the spill opcode.
+ unsigned getNumSubRegsForSpillOp(const MachineInstr &MI) const;
+
/// Legalize the \p OpIndex operand of this instruction by inserting
/// a MOV. For example:
/// ADD_I32_e32 VGPR0, 15
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index af89bde6f1a1d..3efdad6288814 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -1124,154 +1124,6 @@ SIRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
return RC == &AMDGPU::SCC_CLASSRegClass ? &AMDGPU::SReg_32RegClass : RC;
}
-unsigned SIRegisterInfo::getNumSubRegsForSpillOp(const MachineInstr &MI) const {
- const SIInstrInfo *TII = ST.getInstrInfo();
-
- unsigned Op = MI.getOpcode();
- switch (Op) {
- case AMDGPU::SI_BLOCK_SPILL_V1024_SAVE:
- case AMDGPU::SI_BLOCK_SPILL_V1024_RESTORE:
- // FIXME: This assumes the mask is statically known and not computed at
- // runtime. However, some ABIs may want to compute the mask dynamically and
- // this will need to be updated.
- return llvm::popcount(
- (uint64_t)TII->getNamedOperand(MI, AMDGPU::OpName::mask)->getImm());
- case AMDGPU::SI_SPILL_S1024_SAVE:
- case AMDGPU::SI_SPILL_S1024_RESTORE:
- case AMDGPU::SI_SPILL_V1024_SAVE:
- case AMDGPU::SI_SPILL_V1024_RESTORE:
- case AMDGPU::SI_SPILL_A1024_SAVE:
- case AMDGPU::SI_SPILL_A1024_RESTORE:
- case AMDGPU::SI_SPILL_AV1024_SAVE:
- case AMDGPU::SI_SPILL_AV1024_RESTORE:
- return 32;
- case AMDGPU::SI_SPILL_S512_SAVE:
- case AMDGPU::SI_SPILL_S512_RESTORE:
- case AMDGPU::SI_SPILL_V512_SAVE:
- case AMDGPU::SI_SPILL_V512_RESTORE:
- case AMDGPU::SI_SPILL_A512_SAVE:
- case AMDGPU::SI_SPILL_A512_RESTORE:
- case AMDGPU::SI_SPILL_AV512_SAVE:
- case AMDGPU::SI_SPILL_AV512_RESTORE:
- return 16;
- case AMDGPU::SI_SPILL_S384_SAVE:
- case AMDGPU::SI_SPILL_S384_RESTORE:
- case AMDGPU::SI_SPILL_V384_SAVE:
- case AMDGPU::SI_SPILL_V384_RESTORE:
- case AMDGPU::SI_SPILL_A384_SAVE:
- case AMDGPU::SI_SPILL_A384_RESTORE:
- case AMDGPU::SI_SPILL_AV384_SAVE:
- case AMDGPU::SI_SPILL_AV384_RESTORE:
- return 12;
- case AMDGPU::SI_SPILL_S352_SAVE:
- case AMDGPU::SI_SPILL_S352_RESTORE:
- case AMDGPU::SI_SPILL_V352_SAVE:
- case AMDGPU::SI_SPILL_V352_RESTORE:
- case AMDGPU::SI_SPILL_A352_SAVE:
- case AMDGPU::SI_SPILL_A352_RESTORE:
- case AMDGPU::SI_SPILL_AV352_SAVE:
- case AMDGPU::SI_SPILL_AV352_RESTORE:
- return 11;
- case AMDGPU::SI_SPILL_S320_SAVE:
- case AMDGPU::SI_SPILL_S320_RESTORE:
- case AMDGPU::SI_SPILL_V320_SAVE:
- case AMDGPU::SI_SPILL_V320_RESTORE:
- case AMDGPU::SI_SPILL_A320_SAVE:
- case AMDGPU::SI_SPILL_A320_RESTORE:
- case AMDGPU::SI_SPILL_AV320_SAVE:
- case AMDGPU::SI_SPILL_AV320_RESTORE:
- return 10;
- case AMDGPU::SI_SPILL_S288_SAVE:
- case AMDGPU::SI_SPILL_S288_RESTORE:
- case AMDGPU::SI_SPILL_V288_SAVE:
- case AMDGPU::SI_SPILL_V288_RESTORE:
- case AMDGPU::SI_SPILL_A288_SAVE:
- case AMDGPU::SI_SPILL_A288_RESTORE:
- case AMDGPU::SI_SPILL_AV288_SAVE:
- case AMDGPU::SI_SPILL_AV288_RESTORE:
- return 9;
- case AMDGPU::SI_SPILL_S256_SAVE:
- case AMDGPU::SI_SPILL_S256_RESTORE:
- case AMDGPU::SI_SPILL_V256_SAVE:
- case AMDGPU::SI_SPILL_V256_RESTORE:
- case AMDGPU::SI_SPILL_A256_SAVE:
- case AMDGPU::SI_SPILL_A256_RESTORE:
- case AMDGPU::SI_SPILL_AV256_SAVE:
- case AMDGPU::SI_SPILL_AV256_RESTORE:
- return 8;
- case AMDGPU::SI_SPILL_S224_SAVE:
- case AMDGPU::SI_SPILL_S224_RESTORE:
- case AMDGPU::SI_SPILL_V224_SAVE:
- case AMDGPU::SI_SPILL_V224_RESTORE:
- case AMDGPU::SI_SPILL_A224_SAVE:
- case AMDGPU::SI_SPILL_A224_RESTORE:
- case AMDGPU::SI_SPILL_AV224_SAVE:
- case AMDGPU::SI_SPILL_AV224_RESTORE:
- return 7;
- case AMDGPU::SI_SPILL_S192_SAVE:
- case AMDGPU::SI_SPILL_S192_RESTORE:
- case AMDGPU::SI_SPILL_V192_SAVE:
- case AMDGPU::SI_SPILL_V192_RESTORE:
- case AMDGPU::SI_SPILL_A192_SAVE:
- case AMDGPU::SI_SPILL_A192_RESTORE:
- case AMDGPU::SI_SPILL_AV192_SAVE:
- case AMDGPU::SI_SPILL_AV192_RESTORE:
- return 6;
- case AMDGPU::SI_SPILL_S160_SAVE:
- case AMDGPU::SI_SPILL_S160_RESTORE:
- case AMDGPU::SI_SPILL_V160_SAVE:
- case AMDGPU::SI_SPILL_V160_RESTORE:
- case AMDGPU::SI_SPILL_A160_SAVE:
- case AMDGPU::SI_SPILL_A160_RESTORE:
- case AMDGPU::SI_SPILL_AV160_SAVE:
- case AMDGPU::SI_SPILL_AV160_RESTORE:
- return 5;
- case AMDGPU::SI_SPILL_S128_SAVE:
- case AMDGPU::SI_SPILL_S128_RESTORE:
- case AMDGPU::SI_SPILL_V128_SAVE:
- case AMDGPU::SI_SPILL_V128_RESTORE:
- case AMDGPU::SI_SPILL_A128_SAVE:
- case AMDGPU::SI_SPILL_A128_RESTORE:
- case AMDGPU::SI_SPILL_AV128_SAVE:
- case AMDGPU::SI_SPILL_AV128_RESTORE:
- return 4;
- case AMDGPU::SI_SPILL_S96_SAVE:
- case AMDGPU::SI_SPILL_S96_RESTORE:
- case AMDGPU::SI_SPILL_V96_SAVE:
- case AMDGPU::SI_SPILL_V96_RESTORE:
- case AMDGPU::SI_SPILL_A96_SAVE:
- case AMDGPU::SI_SPILL_A96_RESTORE:
- case AMDGPU::SI_SPILL_AV96_SAVE:
- case AMDGPU::SI_SPILL_AV96_RESTORE:
- return 3;
- case AMDGPU::SI_SPILL_S64_SAVE:
- case AMDGPU::SI_SPILL_S64_RESTORE:
- case AMDGPU::SI_SPILL_V64_SAVE:
- case AMDGPU::SI_SPILL_V64_RESTORE:
- case AMDGPU::SI_SPILL_A64_SAVE:
- case AMDGPU::SI_SPILL_A64_RESTORE:
- case AMDGPU::SI_SPILL_AV64_SAVE:
- case AMDGPU::SI_SPILL_AV64_RESTORE:
- return 2;
- case AMDGPU::SI_SPILL_S32_SAVE:
- case AMDGPU::SI_SPILL_S32_RESTORE:
- case AMDGPU::SI_SPILL_V32_SAVE:
- case AMDGPU::SI_SPILL_V32_RESTORE:
- case AMDGPU::SI_SPILL_A32_SAVE:
- case AMDGPU::SI_SPILL_A32_RESTORE:
- case AMDGPU::SI_SPILL_AV32_SAVE:
- case AMDGPU::SI_SPILL_AV32_RESTORE:
- case AMDGPU::SI_SPILL_WWM_V32_SAVE:
- case AMDGPU::SI_SPILL_WWM_V32_RESTORE:
- case AMDGPU::SI_SPILL_WWM_AV32_SAVE:
- case AMDGPU::SI_SPILL_WWM_AV32_RESTORE:
- case AMDGPU::SI_SPILL_V16_SAVE:
- case AMDGPU::SI_SPILL_V16_RESTORE:
- return 1;
- default: llvm_unreachable("Invalid spill opcode");
- }
-}
-
static int getOffsetMUBUFStore(unsigned Opc) {
switch (Opc) {
case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
@@ -2462,7 +2314,7 @@ bool SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
*MBB, MI, DL, Opc, Index, VData->getReg(), VData->isKill(), FrameReg,
TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
*MI->memoperands_begin(), RS);
- MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(*MI));
+ MFI->addToSpilledVGPRs(TII->getNumSubRegsForSpillOp(*MI));
if (IsWWMRegSpill)
TII->restoreExec(*MF, *MBB, MI, DL, MFI->getSGPRForEXECCopy());
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
index b959fe7cf2db1..2e2916f68f584 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
@@ -164,8 +164,6 @@ class SIRegisterInfo final : public AMDGPUGenRegisterInfo {
const TargetRegisterClass *
getCrossCopyRegClass(const TargetRegisterClass *RC) const override;
- unsigned getNumSubRegsForSpillOp(const MachineInstr &MI) const;
-
const TargetRegisterClass *
getRegClassForBlockOp(const MachineFunction &MF) const {
return &AMDGPU::VReg_1024RegClass;
>From 144d8a2877a431949d350fec6ed24139d2859097 Mon Sep 17 00:00:00 2001
From: Christudasan Devadasan <Christudasan.Devadasan at amd.com>
Date: Tue, 13 Jan 2026 03:48:15 +0000
Subject: [PATCH 3/3] fixed a comment.
---
llvm/lib/Target/AMDGPU/SIInstrInfo.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 1e46fe0e2d511..38b40ec586a33 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -1392,7 +1392,7 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
return RI.getRegSizeInBits(*getOpRegClass(MI, OpNo)) / 8;
}
- /// Return the number of registers spilled/reloaded by the spill opcode.
+ /// Return the number of registers spilled/reloaded by the spill instruction.
unsigned getNumSubRegsForSpillOp(const MachineInstr &MI) const;
/// Legalize the \p OpIndex operand of this instruction by inserting
More information about the llvm-branch-commits
mailing list