[llvm] [AMDGPU] Add scheduling stage to rewrite MFMA from VGPR to AGPR (PR #149367)

Jeffrey Byrnes via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 17 10:50:50 PDT 2025


https://github.com/jrbyrnes created https://github.com/llvm/llvm-project/pull/149367

After https://github.com/llvm/llvm-project/pull/145025 we will always produce the VGPR MFMA form. While this is beneficial for some cases, there are still cases where using the AGPR form is preferred. Specifically, in cases where we have high per-iteration RP coming from MFMAs and in-loop VGPR users of MFMAs. In such cases, selecting the VGPR form may cause an explosion in VGPR pressure, which degrades the quality of scheduling. The PostRA MFMA rewriter can help improve RA for some of these cases, but it will not help the scheduler.

This PR does rewriting during scheduling as a separate scheduling stage. It will only try to go from VGPR -> AGPR form if we have ArchVGPR pressure over the addressable limit, and if we find that we will not need to issue any cross RC copies in loop. We can also implement AGPR form -> VGPR, but the assumption is that we will always produce VGPR form.

A WIP:
	Needs more testing
	Still a bit undecided about the heuristic
	Considering making the implemenation more generalized for other types of rewriting / transformations, though this may be left as a TODO



>From 2e15bfc2e2edbc2d1a1f26aca13c550cc8528a85 Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Tue, 15 Jul 2025 15:10:41 -0700
Subject: [PATCH] [AMDGPU] Add scheduling stage to rewrite MFMA from VGPR to
 AGPR

Change-Id: I47b2a4274a35f3cf0a6d064674d1d29526e4dfd2
---
 llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp | 286 +++++++++++++++++++-
 llvm/lib/Target/AMDGPU/GCNSchedStrategy.h   |  35 ++-
 2 files changed, 315 insertions(+), 6 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index fce8f36d45969..0a34243c269cb 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -29,6 +29,7 @@
 #include "SIMachineFunctionInfo.h"
 #include "Utils/AMDGPUBaseInfo.h"
 #include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/MachineCycleAnalysis.h"
 #include "llvm/CodeGen/RegisterClassInfo.h"
 #include "llvm/MC/LaneBitmask.h"
 #include "llvm/Support/ErrorHandling.h"
@@ -528,6 +529,7 @@ GCNMaxOccupancySchedStrategy::GCNMaxOccupancySchedStrategy(
     const MachineSchedContext *C, bool IsLegacyScheduler)
     : GCNSchedStrategy(C) {
   SchedStages.push_back(GCNSchedStageID::OccInitialSchedule);
+  SchedStages.push_back(GCNSchedStageID::RewriteSchedule);
   SchedStages.push_back(GCNSchedStageID::UnclusteredHighRPReschedule);
   SchedStages.push_back(GCNSchedStageID::ClusteredLowOccupancyReschedule);
   SchedStages.push_back(GCNSchedStageID::PreRARematerialize);
@@ -778,6 +780,8 @@ GCNScheduleDAGMILive::createSchedStage(GCNSchedStageID SchedStageID) {
   switch (SchedStageID) {
   case GCNSchedStageID::OccInitialSchedule:
     return std::make_unique<OccInitialScheduleStage>(SchedStageID, *this);
+  case GCNSchedStageID::RewriteSchedule:
+    return std::make_unique<RewriteScheduleStage>(SchedStageID, *this);
   case GCNSchedStageID::UnclusteredHighRPReschedule:
     return std::make_unique<UnclusteredHighRPStage>(SchedStageID, *this);
   case GCNSchedStageID::ClusteredLowOccupancyReschedule:
@@ -803,7 +807,8 @@ void GCNScheduleDAGMILive::schedule() {
 GCNRegPressure
 GCNScheduleDAGMILive::getRealRegPressure(unsigned RegionIdx) const {
   GCNDownwardRPTracker RPTracker(*LIS);
-  RPTracker.advance(begin(), end(), &LiveIns[RegionIdx]);
+  RPTracker.advance(Regions[RegionIdx].first, Regions[RegionIdx].second,
+                    &LiveIns[RegionIdx]);
   return RPTracker.moveMaxPressure();
 }
 
@@ -940,10 +945,12 @@ void GCNScheduleDAGMILive::finalizeSchedule() {
   Pressure.resize(Regions.size());
   RegionsWithHighRP.resize(Regions.size());
   RegionsWithExcessRP.resize(Regions.size());
+  RegionsWithExcessArchVGPR.resize(Regions.size());
   RegionsWithMinOcc.resize(Regions.size());
   RegionsWithIGLPInstrs.resize(Regions.size());
   RegionsWithHighRP.reset();
   RegionsWithExcessRP.reset();
+  RegionsWithExcessArchVGPR.reset();
   RegionsWithMinOcc.reset();
   RegionsWithIGLPInstrs.reset();
 
@@ -1002,6 +1009,9 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, const GCNSchedStageID &StageID) {
   case GCNSchedStageID::OccInitialSchedule:
     OS << "Max Occupancy Initial Schedule";
     break;
+  case GCNSchedStageID::RewriteSchedule:
+    OS << "Instruction Rewriting Reschedule";
+    break;
   case GCNSchedStageID::UnclusteredHighRPReschedule:
     OS << "Unclustered High Register Pressure Reschedule";
     break;
@@ -1035,6 +1045,245 @@ bool GCNSchedStage::initGCNSchedStage() {
   return true;
 }
 
+bool RewriteScheduleStage::initGCNSchedStage() {
+  const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
+  if (!ST.hasGFX90AInsts() || DAG.RegionsWithExcessArchVGPR.none())
+    return false;
+
+  const SIInstrInfo *TII = ST.getInstrInfo();
+  const SIRegisterInfo *SRI = ST.getRegisterInfo();
+  SmallPtrSet<MachineInstr *, 16> CrossRCUseCopies;
+  SmallPtrSet<MachineInstr *, 16> CrossRCDefCopies;
+  std::vector<std::pair<MachineInstr *, unsigned>> RewriteInsts;
+
+  for (auto &MBB : MF) {
+    for (auto &MI : MBB) {
+      if (TII->isMAI(MI)) {
+        int ReplacementOp = AMDGPU::getMFMASrcCVDstAGPROp(MI.getOpcode());
+        if (ReplacementOp == -1)
+          continue;
+        const TargetRegisterClass *VGPRRC =
+            DAG.MRI.getRegClass(MI.getOperand(0).getReg());
+        const TargetRegisterClass *AGPRRC = SRI->getEquivalentAGPRClass(VGPRRC);
+        const TargetRegisterClass *DestConstrainExceptRC =
+            recomputeRegClassExceptRewritable(MI.getOperand(0).getReg(), VGPRRC,
+                                              AGPRRC);
+
+        if (!DestConstrainExceptRC)
+          CrossRCUseCopies.insert(&MI);
+
+        MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
+        if (Src2 && Src2->isReg()) {
+          const TargetRegisterClass *Src2ConstrainExceptRC =
+              recomputeRegClassExceptRewritable(Src2->getReg(), VGPRRC, AGPRRC);
+          if ((!Src2ConstrainExceptRC || Src2ConstrainExceptRC != AGPRRC))
+            CrossRCDefCopies.insert(&MI);
+
+          DAG.MRI.setRegClass(Src2->getReg(), AGPRRC);
+        }
+
+        DAG.MRI.setRegClass(MI.getOperand(0).getReg(), AGPRRC);
+
+        auto OriginalOpc = MI.getOpcode();
+        MI.setDesc(TII->get(ReplacementOp));
+        RewriteInsts.push_back({&MI, OriginalOpc});
+      }
+    }
+  }
+
+  bool ShouldRewrite = false;
+  for (unsigned RegionIdx = 0; RegionIdx < DAG.Regions.size(); RegionIdx++) {
+    if (!DAG.RegionsWithExcessArchVGPR[RegionIdx])
+      continue;
+
+    // For the cases we care about (i.e. ArchVGPR usage is greater than the
+    // addressable limit), rewriting alone should bring pressure to manageable
+    // level. If we find any such region, then the rewrite is potentially
+    // beneficial.
+    auto PressureAfter = DAG.getRealRegPressure(RegionIdx);
+    unsigned MaxCombinedVGPRs = ST.getMaxNumVGPRs(MF);
+    if (PressureAfter.getArchVGPRNum() <= ST.getAddressableNumArchVGPRs() &&
+        PressureAfter.getVGPRNum(true) <= MaxCombinedVGPRs) {
+      ShouldRewrite = true;
+      break;
+    }
+  }
+
+  // If we find that we'll need to insert cross RC copies inside loop bodies,
+  // then bail
+  if (ShouldRewrite) {
+    CI.clear();
+    CI.compute(MF);
+
+    for (auto *DefMI : CrossRCUseCopies) {
+      auto DefReg = DefMI->getOperand(0).getReg();
+
+      for (auto &UseMI : DAG.MRI.use_nodbg_instructions(DefReg)) {
+        for (unsigned OpNo = 0; OpNo < UseMI.getNumOperands(); OpNo++) {
+          auto &TheOp = UseMI.getOperand(OpNo);
+          if (!TheOp.isReg() || !TheOp.isUse())
+            continue;
+          if (TheOp.getReg() != DefReg)
+            continue;
+
+          auto RequiredRC = UseMI.getRegClassConstraint(OpNo, DAG.TII, DAG.TRI);
+          if (!RequiredRC || SRI->hasAGPRs(RequiredRC))
+            continue;
+
+          unsigned DefDepth = CI.getCycleDepth(DefMI->getParent());
+          if (DefDepth && CI.getCycleDepth(UseMI.getParent()) >= DefDepth) {
+            ShouldRewrite = false;
+            break;
+          }
+        }
+        if (!ShouldRewrite)
+          break;
+      }
+      if (!ShouldRewrite)
+        break;
+    }
+  }
+
+  // If we haven't found the beneficial conditions, prefer the VGPR form which
+  // may result in less cross RC copies.
+  if (!ShouldRewrite) {
+    for (auto RI : RewriteInsts) {
+      MachineInstr *MI = RI.first;
+
+      assert(TII->isMAI(*MI));
+      const TargetRegisterClass *AGPRRC =
+          DAG.MRI.getRegClass(MI->getOperand(0).getReg());
+      const TargetRegisterClass *VGPRRC = SRI->getEquivalentVGPRClass(AGPRRC);
+
+      MachineOperand *Src2 = TII->getNamedOperand(*MI, AMDGPU::OpName::src2);
+      assert(Src2);
+
+      if (Src2->isReg()) {
+        DAG.MRI.setRegClass(Src2->getReg(), VGPRRC);
+      }
+      DAG.MRI.setRegClass(MI->getOperand(0).getReg(), VGPRRC);
+      MI->setDesc(TII->get(RI.second));
+    }
+
+    return false;
+  }
+
+  DAG.RegionsWithExcessArchVGPR.reset();
+  DAG.RegionsWithExcessRP.reset();
+
+  // Insert cross RC copies for the users of the MFMA result
+  for (auto MI : CrossRCUseCopies) {
+    auto DefReg = MI->getOperand(0).getReg();
+    SmallVector<MachineInstr *, 4> UseInstrs;
+    for (auto &UseMI : DAG.MRI.use_nodbg_instructions(DefReg))
+      UseInstrs.push_back(&UseMI);
+
+    DenseMap<Register, MachineInstr *> NewCopies;
+    for (auto UseMI : UseInstrs) {
+      for (unsigned OpNo = 0; OpNo < UseMI->getNumOperands(); OpNo++) {
+        auto &TheOp = UseMI->getOperand(OpNo);
+        if (!TheOp.isReg() || !TheOp.isUse())
+          continue;
+        if (TheOp.getReg() != DefReg)
+          continue;
+
+        auto RequiredRC = UseMI->getRegClassConstraint(OpNo, DAG.TII, DAG.TRI);
+
+        if (!RequiredRC || SRI->hasAGPRs(RequiredRC))
+          continue;
+
+        Register DestVGPR;
+        if (!NewCopies.contains(DefReg)) {
+          Register DestVGPR = DAG.MRI.createVirtualRegister(
+              SRI->getEquivalentVGPRClass(DAG.MRI.getRegClass(DefReg)));
+
+          // Insert copy near the user to avoid inserting inside loops.
+          MachineInstrBuilder VGPRCopy =
+              BuildMI(*UseMI->getParent(), UseMI->getIterator(),
+                      UseMI->getDebugLoc(), TII->get(TargetOpcode::COPY))
+                  .addDef(DestVGPR, 0, 0)
+                  .addUse(DefReg, 0, 0);
+
+          NewCopies[DefReg] = VGPRCopy;
+        }
+        DestVGPR = NewCopies[DefReg]->getOperand(0).getReg();
+        TheOp.setReg(DestVGPR);
+      }
+    }
+    if (NewCopies.contains(DefReg)) {
+      DAG.LIS->InsertMachineInstrInMaps(*NewCopies[DefReg]);
+      DAG.LIS->removeInterval(DefReg);
+      DAG.LIS->createAndComputeVirtRegInterval(DefReg);
+      DAG.LIS->createAndComputeVirtRegInterval(
+          NewCopies[DefReg]->getOperand(0).getReg());
+    }
+  }
+
+  // Insert cross RC copies for the use operands of the MFMA
+  for (auto MI : CrossRCDefCopies) {
+    MachineOperand *Src2 = TII->getNamedOperand(*MI, AMDGPU::OpName::src2);
+    if (!Src2)
+      continue;
+    if (!Src2->isReg())
+      continue;
+    auto Src2Reg = Src2->getReg();
+    SmallVector<MachineInstr *, 4> DefInstrs;
+    for (auto &DefMI : DAG.MRI.def_instructions(Src2Reg))
+      DefInstrs.push_back(&DefMI);
+
+    DenseMap<Register, MachineInstr *> NewCopies;
+    for (auto DefMI : DefInstrs) {
+      for (unsigned OpNo = 0; OpNo < DefMI->getNumOperands(); OpNo++) {
+        auto &TheOp = DefMI->getOperand(OpNo);
+        if (!TheOp.isReg() || !TheOp.isDef())
+          continue;
+        if (TheOp.getReg() != Src2Reg)
+          continue;
+
+        auto RequiredRC = DefMI->getRegClassConstraint(OpNo, DAG.TII, DAG.TRI);
+
+        if (!RequiredRC || SRI->hasAGPRs(RequiredRC))
+          continue;
+
+        Register SrcVGPR;
+        if (!NewCopies.contains(Src2Reg)) {
+          Register SrcVGPR = DAG.MRI.createVirtualRegister(
+              SRI->getEquivalentVGPRClass(DAG.MRI.getRegClass(Src2Reg)));
+
+          // Insert copy near the def to avoid inserting inside loops.
+          MachineInstrBuilder VGPRCopy =
+              BuildMI(*DefMI->getParent(), ++DefMI->getIterator(),
+                      DefMI->getDebugLoc(), TII->get(TargetOpcode::COPY))
+                  .addDef(Src2Reg, 0, 0)
+                  .addUse(SrcVGPR, 0, 0);
+
+          NewCopies[Src2Reg] = VGPRCopy;
+        }
+
+        SrcVGPR = NewCopies[Src2Reg]->getOperand(1).getReg();
+        TheOp.setReg(SrcVGPR);
+      }
+    }
+
+    if (NewCopies.contains(Src2Reg)) {
+      DAG.LIS->InsertMachineInstrInMaps(*NewCopies[Src2Reg]);
+      DAG.LIS->removeInterval(Src2Reg);
+      DAG.LIS->createAndComputeVirtRegInterval(Src2Reg);
+      DAG.LIS->createAndComputeVirtRegInterval(
+          NewCopies[Src2Reg]->getOperand(1).getReg());
+    }
+  }
+
+  // Liveins may have been modified for cross RC copies
+  RegionPressureMap LiveInUpdater(&DAG, false);
+  LiveInUpdater.buildLiveRegMap();
+
+  for (unsigned RegionIdx = 0; RegionIdx < DAG.Regions.size(); RegionIdx++)
+    DAG.LiveIns[RegionIdx] = LiveInUpdater.getLiveRegsForRegionIdx(RegionIdx);
+
+  return true;
+}
+
 bool UnclusteredHighRPStage::initGCNSchedStage() {
   if (DisableUnclusterHighRP)
     return false;
@@ -1338,6 +1587,9 @@ void GCNSchedStage::checkScheduling() {
     DAG.RegionsWithExcessRP[RegionIdx] = true;
   }
 
+  if (PressureAfter.getArchVGPRNum() > ST.getAddressableNumArchVGPRs())
+    DAG.RegionsWithExcessArchVGPR[RegionIdx] = true;
+
   // Revert if this region's schedule would cause a drop in occupancy or
   // spilling.
   if (shouldRevertScheduling(WavesAfter)) {
@@ -1641,6 +1893,38 @@ void GCNSchedStage::revertScheduling() {
   DAG.Regions[RegionIdx] = std::pair(DAG.RegionBegin, DAG.RegionEnd);
 }
 
+bool RewriteScheduleStage::isRewriteCandidate(MachineInstr *MI) const {
+
+  if (!static_cast<const SIInstrInfo *>(DAG.TII)->isMAI(*MI))
+    return false;
+  return AMDGPU::getMFMASrcCVDstAGPROp(MI->getOpcode()) != -1;
+}
+
+const TargetRegisterClass *
+RewriteScheduleStage::recomputeRegClassExceptRewritable(
+    Register Reg, const TargetRegisterClass *OldRC,
+    const TargetRegisterClass *NewRC) const {
+
+  // Accumulate constraints from all uses.
+  for (MachineOperand &MO : DAG.MRI.reg_nodbg_operands(Reg)) {
+    // Apply the effect of the given operand to NewRC.
+    MachineInstr *MI = MO.getParent();
+    // We can swap the classes of dst + src2 as a pair to AGPR, so ignore the
+    // effects of rewrite candidates. It just so happens that we can use either
+    // AGPR or VGPR in src0/src1, so don't bother checking the constraint
+    // effects of the individual operands.
+    if (isRewriteCandidate(MI))
+      continue;
+
+    unsigned OpNo = &MO - &MI->getOperand(0);
+    NewRC = MI->getRegClassConstraintEffect(OpNo, NewRC, DAG.TII, DAG.TRI);
+    if (!NewRC || NewRC == OldRC)
+      return nullptr;
+  }
+
+  return NewRC;
+}
+
 bool PreRARematStage::allUsesAvailableAt(const MachineInstr *InstToRemat,
                                          SlotIndex OriginalIdx,
                                          SlotIndex RematIdx) const {
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
index 94cd795bbc8f6..af264d062ce5a 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
@@ -28,11 +28,12 @@ class GCNSchedStage;
 
 enum class GCNSchedStageID : unsigned {
   OccInitialSchedule = 0,
-  UnclusteredHighRPReschedule = 1,
-  ClusteredLowOccupancyReschedule = 2,
-  PreRARematerialize = 3,
-  ILPInitialSchedule = 4,
-  MemoryClauseInitialSchedule = 5
+  RewriteSchedule = 1,
+  UnclusteredHighRPReschedule = 2,
+  ClusteredLowOccupancyReschedule = 3,
+  PreRARematerialize = 4,
+  ILPInitialSchedule = 5,
+  MemoryClauseInitialSchedule = 6
 };
 
 #ifndef NDEBUG
@@ -224,6 +225,7 @@ using RegionBoundaries =
 class GCNScheduleDAGMILive final : public ScheduleDAGMILive {
   friend class GCNSchedStage;
   friend class OccInitialScheduleStage;
+  friend class RewriteScheduleStage;
   friend class UnclusteredHighRPStage;
   friend class ClusteredLowOccStage;
   friend class PreRARematStage;
@@ -250,6 +252,11 @@ class GCNScheduleDAGMILive final : public ScheduleDAGMILive {
   // limit. Register pressure in these regions usually will result in spilling.
   BitVector RegionsWithExcessRP;
 
+  // Record regions with excess archvgpr register pressure over the physical
+  // register limit. Register pressure in these regions usually will result in
+  // spilling.
+  BitVector RegionsWithExcessArchVGPR;
+
   // Regions that has the same occupancy as the latest MinOccupancy
   BitVector RegionsWithMinOcc;
 
@@ -401,6 +408,24 @@ class OccInitialScheduleStage : public GCNSchedStage {
       : GCNSchedStage(StageID, DAG) {}
 };
 
+class RewriteScheduleStage : public GCNSchedStage {
+private:
+  const TargetRegisterClass *
+  recomputeRegClassExceptRewritable(Register Reg,
+                                    const TargetRegisterClass *OldRC,
+                                    const TargetRegisterClass *NewRC) const;
+
+  bool isRewriteCandidate(MachineInstr *MI) const;
+
+  MachineCycleInfo CI;
+
+public:
+  bool initGCNSchedStage() override;
+
+  RewriteScheduleStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
+      : GCNSchedStage(StageID, DAG) {}
+};
+
 class UnclusteredHighRPStage : public GCNSchedStage {
 private:
   // Save the initial occupancy before starting this stage.



More information about the llvm-commits mailing list