[llvm] [AMDGPU] Optionally Use GCNRPTrackers during scheduling (PR #93090)
Jeffrey Byrnes via llvm-commits
llvm-commits at lists.llvm.org
Tue Jun 18 12:02:49 PDT 2024
https://github.com/jrbyrnes updated https://github.com/llvm/llvm-project/pull/93090
>From ae725b04198dda65470646825aa1705bd9351eb4 Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Tue, 21 May 2024 12:55:07 -0700
Subject: [PATCH 01/11] [AMDGPU] NFC: Add BBLiveOutMap & LiveOut Cache
Change-Id: I63cfd44e635cc4bee0e6780ca43b692c46e940b7
---
llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp | 47 +++++++++++++++++++--
llvm/lib/Target/AMDGPU/GCNSchedStrategy.h | 31 ++++++++++++++
2 files changed, 75 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index 217279211531b..c5f0d9910861b 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -58,6 +58,11 @@ static cl::opt<bool>
"Wave Limited (amdgpu-limit-wave-threshold)."),
cl::init(false));
+static cl::opt<bool> GCNTrackers(
+ "amdgpu-use-amdgpu-trackers", cl::Hidden,
+ cl::desc("Use the AMDGPU specific RPTrackers during scheduling"),
+ cl::init(false));
+
const unsigned ScheduleMetrics::ScaleFactor = 100;
GCNSchedStrategy::GCNSchedStrategy(const MachineSchedContext *C)
@@ -571,7 +576,8 @@ GCNScheduleDAGMILive::GCNScheduleDAGMILive(
MachineSchedContext *C, std::unique_ptr<MachineSchedStrategy> S)
: ScheduleDAGMILive(C, std::move(S)), ST(MF.getSubtarget<GCNSubtarget>()),
MFI(*MF.getInfo<SIMachineFunctionInfo>()),
- StartingOccupancy(MFI.getOccupancy()), MinOccupancy(StartingOccupancy) {
+ StartingOccupancy(MFI.getOccupancy()), MinOccupancy(StartingOccupancy),
+ RegionLiveOuts(this, /*IsLiveOut=*/true) {
LLVM_DEBUG(dbgs() << "Starting occupancy is " << StartingOccupancy << ".\n");
if (RelaxedOcc) {
@@ -613,6 +619,14 @@ GCNScheduleDAGMILive::getRealRegPressure(unsigned RegionIdx) const {
return RPTracker.moveMaxPressure();
}
+static MachineInstr *getLastMIForRegion(MachineBasicBlock::iterator RegionBegin,
+ MachineBasicBlock::iterator RegionEnd) {
+ auto REnd = RegionEnd == RegionBegin->getParent()->end()
+ ? std::prev(RegionEnd)
+ : RegionEnd;
+ return &*skipDebugInstructionsBackward(REnd, RegionBegin);
+}
+
void GCNScheduleDAGMILive::computeBlockPressure(unsigned RegionIdx,
const MachineBasicBlock *MBB) {
GCNDownwardRPTracker RPTracker(*LIS);
@@ -700,7 +714,31 @@ GCNScheduleDAGMILive::getBBLiveInMap() const {
++I;
} while (I != E && I->first->getParent() == BB);
} while (I != E);
- return getLiveRegMap(BBStarters, false /*After*/, *LIS);
+ return getLiveRegMap(BBStarters, /*After=*/false, *LIS);
+}
+
+DenseMap<MachineInstr *, GCNRPTracker::LiveRegSet>
+GCNScheduleDAGMILive::getBBLiveOutMap() const {
+ assert(!Regions.empty());
+ std::vector<MachineInstr *> BBEnders;
+ BBEnders.reserve(Regions.size());
+ for (auto &[RegionBegin, RegionEnd] : reverse(Regions))
+ BBEnders.push_back(getLastMIForRegion(RegionBegin, RegionEnd));
+
+ return getLiveRegMap(BBEnders, /*After= */ true, *LIS);
+}
+
+void RegionPressureMap::buildLiveRegMap() {
+ IdxToInstruction.clear();
+
+ BBLiveRegMap = IsLiveOut ? DAG->getBBLiveOutMap() : DAG->getBBLiveInMap();
+ for (unsigned I = 0; I < DAG->Regions.size(); I++) {
+ MachineInstr *RegionKey =
+ IsLiveOut
+ ? getLastMIForRegion(DAG->Regions[I].first, DAG->Regions[I].second)
+ : &*DAG->Regions[I].first;
+ IdxToInstruction[I] = RegionKey;
+ }
}
void GCNScheduleDAGMILive::finalizeSchedule() {
@@ -726,8 +764,11 @@ void GCNScheduleDAGMILive::finalizeSchedule() {
void GCNScheduleDAGMILive::runSchedStages() {
LLVM_DEBUG(dbgs() << "All regions recorded, starting actual scheduling.\n");
- if (!Regions.empty())
+ if (!Regions.empty()) {
BBLiveInMap = getBBLiveInMap();
+ if (GCNTrackers)
+ RegionLiveOuts.buildLiveRegMap();
+ }
GCNSchedStrategy &S = static_cast<GCNSchedStrategy &>(*SchedImpl);
while (S.advanceStage()) {
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
index f0aea2bc4ab86..fd753dad65ad0 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
@@ -163,6 +163,32 @@ inline raw_ostream &operator<<(raw_ostream &OS, const ScheduleMetrics &Sm) {
return OS;
}
+class GCNScheduleDAGMILive;
+class RegionPressureMap {
+ GCNScheduleDAGMILive *DAG;
+ // The live in/out pressure as indexed by the first or last MI in the region
+ // before scheduling.
+ DenseMap<MachineInstr *, GCNRPTracker::LiveRegSet> BBLiveRegMap;
+ // The mapping of RegionIDx to key instruction
+ DenseMap<unsigned, MachineInstr *> IdxToInstruction;
+ // Whether we are calculating LiveOuts or LiveIns
+ bool IsLiveOut;
+
+public:
+ RegionPressureMap() {}
+ RegionPressureMap(GCNScheduleDAGMILive *GCNDAG, bool LiveOut)
+ : DAG(GCNDAG), IsLiveOut(LiveOut) {}
+ // Build the Instr->LiveReg and RegionIdx->Instr maps
+ void buildLiveRegMap();
+
+ // Retrieve the LiveReg for a given RegionIdx
+ GCNRPTracker::LiveRegSet &getLiveRegsForRegionIdx(unsigned RegionIdx) {
+ assert(IdxToInstruction.find(RegionIdx) != IdxToInstruction.end());
+ MachineInstr *Key = IdxToInstruction[RegionIdx];
+ return BBLiveRegMap[Key];
+ }
+};
+
class GCNScheduleDAGMILive final : public ScheduleDAGMILive {
friend class GCNSchedStage;
friend class OccInitialScheduleStage;
@@ -170,6 +196,7 @@ class GCNScheduleDAGMILive final : public ScheduleDAGMILive {
friend class ClusteredLowOccStage;
friend class PreRARematStage;
friend class ILPInitialScheduleStage;
+ friend class RegionPressureMap;
const GCNSubtarget &ST;
@@ -215,6 +242,10 @@ class GCNScheduleDAGMILive final : public ScheduleDAGMILive {
DenseMap<MachineInstr *, GCNRPTracker::LiveRegSet> getBBLiveInMap() const;
+ DenseMap<MachineInstr *, GCNRPTracker::LiveRegSet> getBBLiveOutMap() const;
+
+ RegionPressureMap RegionLiveOuts;
+
// Return current region pressure.
GCNRegPressure getRealRegPressure(unsigned RegionIdx) const;
>From 88d46b3ac1a154675dcbde3671c3fa2549d3a4e4 Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Tue, 21 May 2024 13:34:59 -0700
Subject: [PATCH 02/11] [AMDGPU] NFC: Provide RPTracker interface for external
iterators
Change-Id: I79b54722e6e858961486248d94766c3f3c161160
---
llvm/lib/Target/AMDGPU/GCNRegPressure.cpp | 77 +++++++++++++-------
llvm/lib/Target/AMDGPU/GCNRegPressure.h | 88 ++++++++++++++++-------
2 files changed, 113 insertions(+), 52 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp b/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
index 5c394e6d6296d..a2d76807d3a71 100644
--- a/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
@@ -343,21 +343,23 @@ void GCNRPTracker::reset(const MachineInstr &MI,
MaxPressure = CurPressure = getRegPressure(*MRI, LiveRegs);
}
-////////////////////////////////////////////////////////////////////////////////
-// GCNUpwardRPTracker
-
-void GCNUpwardRPTracker::reset(const MachineRegisterInfo &MRI_,
- const LiveRegSet &LiveRegs_) {
+void GCNRPTracker::reset(const MachineRegisterInfo &MRI_,
+ const LiveRegSet &LiveRegs_) {
MRI = &MRI_;
LiveRegs = LiveRegs_;
LastTrackedMI = nullptr;
MaxPressure = CurPressure = getRegPressure(MRI_, LiveRegs_);
}
-void GCNUpwardRPTracker::recede(const MachineInstr &MI) {
+////////////////////////////////////////////////////////////////////////////////
+// GCNUpwardRPTracker
+
+void GCNUpwardRPTracker::recede(const MachineInstr &MI,
+ bool UseInternalIterator) {
assert(MRI && "call reset first");
- LastTrackedMI = &MI;
+ if (UseInternalIterator)
+ LastTrackedMI = &MI;
if (MI.isDebugInstr())
return;
@@ -430,28 +432,44 @@ bool GCNDownwardRPTracker::reset(const MachineInstr &MI,
return true;
}
-bool GCNDownwardRPTracker::advanceBeforeNext() {
+bool GCNDownwardRPTracker::advanceBeforeNext(MachineInstr *MI,
+ bool UseInternalIterator,
+ LiveIntervals *TheLIS) {
assert(MRI && "call reset first");
- if (!LastTrackedMI)
- return NextMI == MBBEnd;
-
- assert(NextMI == MBBEnd || !NextMI->isDebugInstr());
+ SlotIndex SI;
+ LiveIntervals *CurrLIS;
+ MachineInstr *CurrMI;
+ if (UseInternalIterator) {
+ if (!LastTrackedMI)
+ return NextMI == MBBEnd;
+
+ assert(NextMI == MBBEnd || !NextMI->isDebugInstr());
+ CurrLIS = const_cast<LiveIntervals *>(&LIS);
+ CurrMI = const_cast<MachineInstr *>(LastTrackedMI);
+
+ SI = NextMI == MBBEnd
+ ? CurrLIS->getInstructionIndex(*LastTrackedMI).getDeadSlot()
+ : CurrLIS->getInstructionIndex(*NextMI).getBaseIndex();
+ } else { //! UseInternalIterator
+ CurrLIS = TheLIS;
+ SI = CurrLIS->getInstructionIndex(*MI).getBaseIndex();
+ CurrMI = MI;
+ }
- SlotIndex SI = NextMI == MBBEnd
- ? LIS.getInstructionIndex(*LastTrackedMI).getDeadSlot()
- : LIS.getInstructionIndex(*NextMI).getBaseIndex();
assert(SI.isValid());
// Remove dead registers or mask bits.
SmallSet<Register, 8> SeenRegs;
- for (auto &MO : LastTrackedMI->operands()) {
+ for (auto &MO : CurrMI->operands()) {
if (!MO.isReg() || !MO.getReg().isVirtual())
continue;
if (MO.isUse() && !MO.readsReg())
continue;
+ if (!UseInternalIterator && MO.isDef())
+ continue;
if (!SeenRegs.insert(MO.getReg()).second)
continue;
- const LiveInterval &LI = LIS.getInterval(MO.getReg());
+ const LiveInterval &LI = CurrLIS->getInterval(MO.getReg());
if (LI.hasSubRanges()) {
auto It = LiveRegs.end();
for (const auto &S : LI.subranges()) {
@@ -481,15 +499,21 @@ bool GCNDownwardRPTracker::advanceBeforeNext() {
LastTrackedMI = nullptr;
- return NextMI == MBBEnd;
+ return UseInternalIterator && (NextMI == MBBEnd);
}
-void GCNDownwardRPTracker::advanceToNext() {
- LastTrackedMI = &*NextMI++;
- NextMI = skipDebugInstructionsForward(NextMI, MBBEnd);
+void GCNDownwardRPTracker::advanceToNext(MachineInstr *MI,
+ bool UseInternalIterator) {
+ if (UseInternalIterator) {
+ LastTrackedMI = &*NextMI++;
+ NextMI = skipDebugInstructionsForward(NextMI, MBBEnd);
+ }
+
+ MachineInstr *CurrMI =
+ UseInternalIterator ? const_cast<MachineInstr *>(LastTrackedMI) : MI;
// Add new registers or mask bits.
- for (const auto &MO : LastTrackedMI->all_defs()) {
+ for (const auto &MO : CurrMI->all_defs()) {
Register Reg = MO.getReg();
if (!Reg.isVirtual())
continue;
@@ -502,11 +526,12 @@ void GCNDownwardRPTracker::advanceToNext() {
MaxPressure = max(MaxPressure, CurPressure);
}
-bool GCNDownwardRPTracker::advance() {
- if (NextMI == MBBEnd)
+bool GCNDownwardRPTracker::advance(MachineInstr *MI, bool UseInternalIterator,
+ LiveIntervals *TheLIS) {
+ if (UseInternalIterator && NextMI == MBBEnd)
return false;
- advanceBeforeNext();
- advanceToNext();
+ advanceBeforeNext(MI, UseInternalIterator, TheLIS);
+ advanceToNext(MI, UseInternalIterator);
return true;
}
diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.h b/llvm/lib/Target/AMDGPU/GCNRegPressure.h
index 752f53752fa68..6ae20dad8e9a3 100644
--- a/llvm/lib/Target/AMDGPU/GCNRegPressure.h
+++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.h
@@ -143,6 +143,9 @@ inline GCNRegPressure operator-(const GCNRegPressure &P1,
return Diff;
}
+///////////////////////////////////////////////////////////////////////////////
+// GCNRPTracker
+
class GCNRPTracker {
public:
using LiveRegSet = DenseMap<unsigned, LaneBitmask>;
@@ -160,6 +163,9 @@ class GCNRPTracker {
bool After);
public:
+ // reset tracker and set live register set to the specified value.
+ void reset(const MachineRegisterInfo &MRI_, const LiveRegSet &LiveRegs_);
+
// live regs for the current state
const decltype(LiveRegs) &getLiveRegs() const { return LiveRegs; }
const MachineInstr *getLastTrackedMI() const { return LastTrackedMI; }
@@ -176,34 +182,38 @@ class GCNRPTracker {
GCNRPTracker::LiveRegSet getLiveRegs(SlotIndex SI, const LiveIntervals &LIS,
const MachineRegisterInfo &MRI);
+////////////////////////////////////////////////////////////////////////////////
+// GCNUpwardRPTracker
+
class GCNUpwardRPTracker : public GCNRPTracker {
public:
GCNUpwardRPTracker(const LiveIntervals &LIS_) : GCNRPTracker(LIS_) {}
- // reset tracker and set live register set to the specified value.
- void reset(const MachineRegisterInfo &MRI_, const LiveRegSet &LiveRegs_);
+ using GCNRPTracker::reset;
- // reset tracker at the specified slot index.
+ /// reset tracker at the specified slot index \p SI.
void reset(const MachineRegisterInfo &MRI, SlotIndex SI) {
- reset(MRI, llvm::getLiveRegs(SI, LIS, MRI));
+ GCNRPTracker::reset(MRI, llvm::getLiveRegs(SI, LIS, MRI));
}
- // reset tracker to the end of the MBB.
+ /// reset tracker to the end of the \p MBB.
void reset(const MachineBasicBlock &MBB) {
reset(MBB.getParent()->getRegInfo(),
LIS.getSlotIndexes()->getMBBEndIdx(&MBB));
}
- // reset tracker to the point just after MI (in program order).
+ /// reset tracker to the point just after \p MI (in program order).
void reset(const MachineInstr &MI) {
reset(MI.getMF()->getRegInfo(), LIS.getInstructionIndex(MI).getDeadSlot());
}
- // move to the state just before the MI (in program order).
- void recede(const MachineInstr &MI);
+ /// Move to the state of RP just before the \p MI . If \p UseInternalIterator
+ /// is set, also update the internal iterators. Setting \p UseInternalIterator
+ /// to false allows for an externally managed iterator / program order.
+ void recede(const MachineInstr &MI, bool UseInternalIterator = true);
- // checks whether the tracker's state after receding MI corresponds
- // to reported by LIS.
+ /// \p returns whether the tracker's state after receding MI corresponds
+ /// to reported by LIS.
bool isValid() const;
const GCNRegPressure &getMaxPressure() const { return MaxPressure; }
@@ -217,6 +227,9 @@ class GCNUpwardRPTracker : public GCNRPTracker {
}
};
+////////////////////////////////////////////////////////////////////////////////
+// GCNDownwardRPTracker
+
class GCNDownwardRPTracker : public GCNRPTracker {
// Last position of reset or advanceBeforeNext
MachineBasicBlock::const_iterator NextMI;
@@ -226,34 +239,57 @@ class GCNDownwardRPTracker : public GCNRPTracker {
public:
GCNDownwardRPTracker(const LiveIntervals &LIS_) : GCNRPTracker(LIS_) {}
+ using GCNRPTracker::reset;
+
MachineBasicBlock::const_iterator getNext() const { return NextMI; }
- // Return MaxPressure and clear it.
+ /// \p return MaxPressure and clear it.
GCNRegPressure moveMaxPressure() {
auto Res = MaxPressure;
MaxPressure.clear();
return Res;
}
- // Reset tracker to the point before the MI
- // filling live regs upon this point using LIS.
- // Returns false if block is empty except debug values.
+ /// Reset tracker to the point before the \p MI
+ /// filling \p LiveRegs upon this point using LIS.
+ /// \p returns false if block is empty except debug values.
bool reset(const MachineInstr &MI, const LiveRegSet *LiveRegs = nullptr);
- // Move to the state right before the next MI or after the end of MBB.
- // Returns false if reached end of the block.
- bool advanceBeforeNext();
-
- // Move to the state at the MI, advanceBeforeNext has to be called first.
- void advanceToNext();
-
- // Move to the state at the next MI. Returns false if reached end of block.
- bool advance();
-
- // Advance instructions until before End.
+ /// Move to the state right before the next MI or after the end of MBB.
+ /// \p returns false if reached end of the block.
+ /// If \p UseInternalIterator is true, then internal iterators are used and
+ /// set to process in program order. If \p UseInternalIterator is false, then
+ /// it is assumed that the tracker is using an externally managed iterator,
+ /// and advance* calls will not update the state of the iterator. In such
+ /// cases, the tracker will move to the state right before the provided \p MI
+ /// and use the provided \p TheLIS for RP calculations.
+ bool advanceBeforeNext(MachineInstr *MI = nullptr,
+ bool UseInternalIterator = true,
+ LiveIntervals *TheLIS = nullptr);
+
+ /// Move to the state at the MI, advanceBeforeNext has to be called first.
+ /// If \p UseInternalIterator is true, then internal iterators are used and
+ /// set to process in program order. If \p UseInternalIterator is false, then
+ /// it is assumed that the tracker is using an externally managed iterator,
+ /// and advance* calls will not update the state of the iterator. In such
+ /// cases, the tracker will move to the state at the provided \p MI .
+ void advanceToNext(MachineInstr *MI = nullptr,
+ bool UseInternalIterator = true);
+
+ /// Move to the state at the next MI. \p returns false if reached end of
+ /// block. If \p UseInternalIterator is true, then internal iterators are used
+ /// and set to process in program order. If \p UseInternalIterator is false,
+ /// then it is assumed that the tracker is using an externally managed
+ /// iterator, and advance* calls will not update the state of the iterator. In
+ /// such cases, the tracker will move to the state right before the provided
+ /// \p MI and use the provided \p TheLIS for RP calculations.
+ bool advance(MachineInstr *MI = nullptr, bool UseInternalIterator = true,
+ LiveIntervals *TheLIS = nullptr);
+
+ /// Advance instructions until before \p End.
bool advance(MachineBasicBlock::const_iterator End);
- // Reset to Begin and advance to End.
+ /// Reset to \p Begin and advance to \p End.
bool advance(MachineBasicBlock::const_iterator Begin,
MachineBasicBlock::const_iterator End,
const LiveRegSet *LiveRegsCopy = nullptr);
>From e73745054592b5fc15671e6c09934cb9f90820f7 Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Tue, 21 May 2024 18:04:25 -0700
Subject: [PATCH 03/11] [AMDGPU] Optionally Use AMDGPU RPTrackers during
scheduling
Change-Id: I6ae56149c1eb49ea85362267174cc6274c416330
---
.../Target/AMDGPU/GCNIterativeScheduler.cpp | 2 +-
llvm/lib/Target/AMDGPU/GCNRegPressure.h | 1 -
llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp | 88 ++++++++++++++++---
llvm/lib/Target/AMDGPU/GCNSchedStrategy.h | 19 +++-
4 files changed, 93 insertions(+), 17 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp b/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
index aebfe154b3139..ccee5db9a3bb6 100644
--- a/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
@@ -480,7 +480,7 @@ void GCNIterativeScheduler::scheduleLegacyMaxOccupancy(
LLVM_DEBUG(dbgs() << "Scheduling using default scheduler, "
"target occupancy = "
<< TgtOcc << '\n');
- GCNMaxOccupancySchedStrategy LStrgy(Context);
+ GCNMaxOccupancySchedStrategy LStrgy(Context, /*IsLegacyScheduler*/ true);
unsigned FinalOccupancy = std::min(Occ, MFI->getOccupancy());
for (int I = 0; I < NumPasses; ++I) {
diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.h b/llvm/lib/Target/AMDGPU/GCNRegPressure.h
index 6ae20dad8e9a3..671eae2e3c4ad 100644
--- a/llvm/lib/Target/AMDGPU/GCNRegPressure.h
+++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.h
@@ -165,7 +165,6 @@ class GCNRPTracker {
public:
// reset tracker and set live register set to the specified value.
void reset(const MachineRegisterInfo &MRI_, const LiveRegSet &LiveRegs_);
-
// live regs for the current state
const decltype(LiveRegs) &getLiveRegs() const { return LiveRegs; }
const MachineInstr *getLastTrackedMI() const { return LastTrackedMI; }
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index c5f0d9910861b..d709fe3676ddd 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -67,6 +67,7 @@ const unsigned ScheduleMetrics::ScaleFactor = 100;
GCNSchedStrategy::GCNSchedStrategy(const MachineSchedContext *C)
: GenericScheduler(C), TargetOccupancy(0), MF(nullptr),
+ TheTracker(*C->LIS), TheUpwardTracker(*C->LIS),
HasHighPressure(false) {}
void GCNSchedStrategy::initialize(ScheduleDAGMI *DAG) {
@@ -156,14 +157,37 @@ static bool canUsePressureDiffs(const SUnit &SU) {
static void getRegisterPressures(bool AtTop,
const RegPressureTracker &RPTracker, SUnit *SU,
std::vector<unsigned> &Pressure,
- std::vector<unsigned> &MaxPressure) {
+ std::vector<unsigned> &MaxPressure,
+ GCNDownwardRPTracker &TheTracker,
+ GCNUpwardRPTracker &TheUpwardTracker,
+ ScheduleDAGMI *DAG) {
// getDownwardPressure() and getUpwardPressure() make temporary changes to
// the tracker, so we need to pass those function a non-const copy.
RegPressureTracker &TempTracker = const_cast<RegPressureTracker &>(RPTracker);
- if (AtTop)
- TempTracker.getDownwardPressure(SU->getInstr(), Pressure, MaxPressure);
- else
- TempTracker.getUpwardPressure(SU->getInstr(), Pressure, MaxPressure);
+ if (!GCNTrackers) {
+ if (AtTop)
+ TempTracker.getDownwardPressure(SU->getInstr(), Pressure, MaxPressure);
+ else
+ TempTracker.getUpwardPressure(SU->getInstr(), Pressure, MaxPressure);
+ } else {
+ if (AtTop) {
+ GCNDownwardRPTracker TempTopTracker(TheTracker);
+ auto MI = SU->getInstr();
+ TempTopTracker.advance(MI, true, DAG->getLIS());
+
+ Pressure[AMDGPU::RegisterPressureSets::SReg_32] = TempTopTracker.getPressure().getSGPRNum();
+ Pressure[AMDGPU::RegisterPressureSets::VGPR_32] = TempTopTracker.getPressure().getVGPRNum(false);
+ }
+
+ else {
+ GCNUpwardRPTracker TempBotTracker(TheUpwardTracker);
+ auto MI = SU->getInstr();
+ TempBotTracker.recede(*MI, true);
+
+ Pressure[AMDGPU::RegisterPressureSets::SReg_32] = TempBotTracker.getPressure().getSGPRNum();
+ Pressure[AMDGPU::RegisterPressureSets::VGPR_32] = TempBotTracker.getPressure().getVGPRNum(false);
+ }
+ }
}
void GCNSchedStrategy::initCandidate(SchedCandidate &Cand, SUnit *SU,
@@ -192,8 +216,8 @@ void GCNSchedStrategy::initCandidate(SchedCandidate &Cand, SUnit *SU,
//
// In EXPENSIVE_CHECKS, we always query RPTracker to verify the results of
// PressureDiffs.
- if (AtTop || !canUsePressureDiffs(*SU)) {
- getRegisterPressures(AtTop, RPTracker, SU, Pressure, MaxPressure);
+ if (AtTop || !canUsePressureDiffs(*SU) || GCNTrackers) {
+ getRegisterPressures(AtTop, RPTracker, SU, Pressure, MaxPressure, TheTracker, TheUpwardTracker, DAG);
} else {
// Reserve 4 slots.
Pressure.resize(4, 0);
@@ -211,7 +235,7 @@ void GCNSchedStrategy::initCandidate(SchedCandidate &Cand, SUnit *SU,
#ifdef EXPENSIVE_CHECKS
std::vector<unsigned> CheckPressure, CheckMaxPressure;
- getRegisterPressures(AtTop, RPTracker, SU, CheckPressure, CheckMaxPressure);
+ getRegisterPressures(AtTop, RPTracker, SU, CheckPressure, CheckMaxPressure,TheTracker,TheUpwardTracker, DAG);
if (Pressure[AMDGPU::RegisterPressureSets::SReg_32] !=
CheckPressure[AMDGPU::RegisterPressureSets::SReg_32] ||
Pressure[AMDGPU::RegisterPressureSets::VGPR_32] !=
@@ -299,8 +323,16 @@ void GCNSchedStrategy::pickNodeFromQueue(SchedBoundary &Zone,
unsigned SGPRPressure = 0;
unsigned VGPRPressure = 0;
if (DAG->isTrackingPressure()) {
- SGPRPressure = Pressure[AMDGPU::RegisterPressureSets::SReg_32];
- VGPRPressure = Pressure[AMDGPU::RegisterPressureSets::VGPR_32];
+ SGPRPressure =
+ GCNTrackers
+ ? (Zone.isTop() ? TheTracker.getPressure().getSGPRNum()
+ : TheUpwardTracker.getPressure().getSGPRNum())
+ : Pressure[AMDGPU::RegisterPressureSets::SReg_32];
+ VGPRPressure =
+ GCNTrackers
+ ? (Zone.isTop() ? TheTracker.getPressure().getVGPRNum(false)
+ : TheUpwardTracker.getPressure().getVGPRNum(false))
+ : Pressure[AMDGPU::RegisterPressureSets::VGPR_32];
}
ReadyQueue &Q = Zone.Available;
for (SUnit *SU : Q) {
@@ -449,6 +481,16 @@ SUnit *GCNSchedStrategy::pickNode(bool &IsTopNode) {
return SU;
}
+void GCNSchedStrategy::schedNode(SUnit *SU, bool IsTopNode) {
+ if (GCNTrackers) {
+ MachineInstr *MI = SU->getInstr();
+ IsTopNode ? (void)TheTracker.advance(MI, true, DAG->getLIS())
+ : TheUpwardTracker.recede(*MI, true);
+ }
+
+ return GenericScheduler::schedNode(SU, IsTopNode);
+}
+
GCNSchedStageID GCNSchedStrategy::getCurrentStage() {
assert(CurrentStage && CurrentStage != SchedStages.end());
return *CurrentStage;
@@ -475,12 +517,13 @@ GCNSchedStageID GCNSchedStrategy::getNextStage() const {
}
GCNMaxOccupancySchedStrategy::GCNMaxOccupancySchedStrategy(
- const MachineSchedContext *C)
+ const MachineSchedContext *C, bool IsLegacyScheduler)
: GCNSchedStrategy(C) {
SchedStages.push_back(GCNSchedStageID::OccInitialSchedule);
SchedStages.push_back(GCNSchedStageID::UnclusteredHighRPReschedule);
SchedStages.push_back(GCNSchedStageID::ClusteredLowOccupancyReschedule);
SchedStages.push_back(GCNSchedStageID::PreRARematerialize);
+ GCNTrackers = GCNTrackers & !IsLegacyScheduler;
}
GCNMaxILPSchedStrategy::GCNMaxILPSchedStrategy(const MachineSchedContext *C)
@@ -786,6 +829,20 @@ void GCNScheduleDAGMILive::runSchedStages() {
continue;
}
+ if (GCNTrackers) {
+ GCNDownwardRPTracker *TheTracker = S.getTracker();
+ GCNUpwardRPTracker *TheUpwardTracker = S.getUpwardTracker();
+ GCNRPTracker::LiveRegSet *RegionLiveIns = &LiveIns[Stage->getRegionIdx()];
+
+ reinterpret_cast<GCNRPTracker *>(TheTracker)->reset(
+ Regions[Stage->getRegionIdx()].first->getMF()->getRegInfo(),
+ *RegionLiveIns);
+ reinterpret_cast<GCNRPTracker *>(TheUpwardTracker)->reset(
+ Regions[Stage->getRegionIdx()].first->getMF()->getRegInfo(),
+ RegionLiveOuts.getLiveRegsForRegionIdx(Stage->getRegionIdx()));
+
+ }
+
ScheduleDAGMILive::schedule();
Stage->finalizeGCNRegion();
}
@@ -1056,6 +1113,7 @@ void GCNSchedStage::finalizeGCNRegion() {
void GCNSchedStage::checkScheduling() {
// Check the results of scheduling.
PressureAfter = DAG.getRealRegPressure(RegionIdx);
+
LLVM_DEBUG(dbgs() << "Pressure after scheduling: " << print(PressureAfter));
LLVM_DEBUG(dbgs() << "Region: " << RegionIdx << ".\n");
@@ -1607,9 +1665,6 @@ bool PreRARematStage::sinkTriviallyRematInsts(const GCNSubtarget &ST,
MachineInstr *MI = Entry.first;
MachineInstr *OldMI = Entry.second;
- // Remove OldMI from BBLiveInMap since we are sinking it from its MBB.
- DAG.BBLiveInMap.erase(OldMI);
-
// Remove OldMI and update LIS
Register Reg = MI->getOperand(0).getReg();
LIS->RemoveMachineInstrFromMaps(*OldMI);
@@ -1627,6 +1682,11 @@ bool PreRARematStage::sinkTriviallyRematInsts(const GCNSubtarget &ST,
DAG.Regions = NewRegions;
DAG.RescheduleRegions = NewRescheduleRegions;
+ DAG.BBLiveInMap = DAG.getBBLiveInMap();
+
+ if (GCNTrackers)
+ DAG.RegionLiveOuts.buildLiveRegMap();
+
SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
MFI.increaseOccupancy(MF, ++DAG.MinOccupancy);
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
index fd753dad65ad0..554df736177fa 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
@@ -70,6 +70,12 @@ class GCNSchedStrategy : public GenericScheduler {
// Pointer to the current SchedStageID.
SmallVectorImpl<GCNSchedStageID>::iterator CurrentStage = nullptr;
+ // GCN RP Tracker for top-down scheduling
+ mutable GCNDownwardRPTracker TheTracker;
+
+ // GCN RP Tracker for botttom-up scheduling
+ mutable GCNUpwardRPTracker TheUpwardTracker;
+
public:
// schedule() have seen register pressure over the critical limits and had to
// track register pressure for actual scheduling heuristics.
@@ -102,6 +108,8 @@ class GCNSchedStrategy : public GenericScheduler {
SUnit *pickNode(bool &IsTopNode) override;
+ void schedNode(SUnit *SU, bool IsTopNode) override;
+
void initialize(ScheduleDAGMI *DAG) override;
unsigned getTargetOccupancy() { return TargetOccupancy; }
@@ -116,13 +124,19 @@ class GCNSchedStrategy : public GenericScheduler {
bool hasNextStage() const;
GCNSchedStageID getNextStage() const;
+
+ GCNDownwardRPTracker *getTracker() { return &TheTracker; }
+
+ GCNUpwardRPTracker *getUpwardTracker() { return &TheUpwardTracker; }
+
};
/// The goal of this scheduling strategy is to maximize kernel occupancy (i.e.
/// maximum number of waves per simd).
class GCNMaxOccupancySchedStrategy final : public GCNSchedStrategy {
public:
- GCNMaxOccupancySchedStrategy(const MachineSchedContext *C);
+ GCNMaxOccupancySchedStrategy(const MachineSchedContext *C,
+ bool IsLegacyScheduler = false);
};
/// The goal of this scheduling strategy is to maximize ILP for a single wave
@@ -341,6 +355,9 @@ class GCNSchedStage {
bool isRegionWithExcessRP() const {
return DAG.RegionsWithExcessRP[RegionIdx];
}
+
+ // The region number this stage is currently working on
+ unsigned getRegionIdx() { return RegionIdx; }
// Returns true if the new schedule may result in more spilling.
bool mayCauseSpilling(unsigned WavesAfter);
>From 53b4791e39de3e61f5406cc02a7554f971268aaf Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Fri, 14 Jun 2024 14:46:28 -0700
Subject: [PATCH 04/11] Formatting
Change-Id: I1cb0a88e94f4156da6118fcd3724556939351c6d
---
llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp | 42 ++++++++++++---------
llvm/lib/Target/AMDGPU/GCNSchedStrategy.h | 3 +-
2 files changed, 25 insertions(+), 20 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index d709fe3676ddd..1172c0e43d707 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -66,9 +66,8 @@ static cl::opt<bool> GCNTrackers(
const unsigned ScheduleMetrics::ScaleFactor = 100;
GCNSchedStrategy::GCNSchedStrategy(const MachineSchedContext *C)
- : GenericScheduler(C), TargetOccupancy(0), MF(nullptr),
- TheTracker(*C->LIS), TheUpwardTracker(*C->LIS),
- HasHighPressure(false) {}
+ : GenericScheduler(C), TargetOccupancy(0), MF(nullptr), TheTracker(*C->LIS),
+ TheUpwardTracker(*C->LIS), HasHighPressure(false) {}
void GCNSchedStrategy::initialize(ScheduleDAGMI *DAG) {
GenericScheduler::initialize(DAG);
@@ -175,8 +174,10 @@ static void getRegisterPressures(bool AtTop,
auto MI = SU->getInstr();
TempTopTracker.advance(MI, true, DAG->getLIS());
- Pressure[AMDGPU::RegisterPressureSets::SReg_32] = TempTopTracker.getPressure().getSGPRNum();
- Pressure[AMDGPU::RegisterPressureSets::VGPR_32] = TempTopTracker.getPressure().getVGPRNum(false);
+ Pressure[AMDGPU::RegisterPressureSets::SReg_32] =
+ TempTopTracker.getPressure().getSGPRNum();
+ Pressure[AMDGPU::RegisterPressureSets::VGPR_32] =
+ TempTopTracker.getPressure().getVGPRNum(false);
}
else {
@@ -184,8 +185,10 @@ static void getRegisterPressures(bool AtTop,
auto MI = SU->getInstr();
TempBotTracker.recede(*MI, true);
- Pressure[AMDGPU::RegisterPressureSets::SReg_32] = TempBotTracker.getPressure().getSGPRNum();
- Pressure[AMDGPU::RegisterPressureSets::VGPR_32] = TempBotTracker.getPressure().getVGPRNum(false);
+ Pressure[AMDGPU::RegisterPressureSets::SReg_32] =
+ TempBotTracker.getPressure().getSGPRNum();
+ Pressure[AMDGPU::RegisterPressureSets::VGPR_32] =
+ TempBotTracker.getPressure().getVGPRNum(false);
}
}
}
@@ -217,7 +220,8 @@ void GCNSchedStrategy::initCandidate(SchedCandidate &Cand, SUnit *SU,
// In EXPENSIVE_CHECKS, we always query RPTracker to verify the results of
// PressureDiffs.
if (AtTop || !canUsePressureDiffs(*SU) || GCNTrackers) {
- getRegisterPressures(AtTop, RPTracker, SU, Pressure, MaxPressure, TheTracker, TheUpwardTracker, DAG);
+ getRegisterPressures(AtTop, RPTracker, SU, Pressure, MaxPressure,
+ TheTracker, TheUpwardTracker, DAG);
} else {
// Reserve 4 slots.
Pressure.resize(4, 0);
@@ -235,7 +239,8 @@ void GCNSchedStrategy::initCandidate(SchedCandidate &Cand, SUnit *SU,
#ifdef EXPENSIVE_CHECKS
std::vector<unsigned> CheckPressure, CheckMaxPressure;
- getRegisterPressures(AtTop, RPTracker, SU, CheckPressure, CheckMaxPressure,TheTracker,TheUpwardTracker, DAG);
+ getRegisterPressures(AtTop, RPTracker, SU, CheckPressure, CheckMaxPressure,
+ TheTracker, TheUpwardTracker, DAG);
if (Pressure[AMDGPU::RegisterPressureSets::SReg_32] !=
CheckPressure[AMDGPU::RegisterPressureSets::SReg_32] ||
Pressure[AMDGPU::RegisterPressureSets::VGPR_32] !=
@@ -832,15 +837,16 @@ void GCNScheduleDAGMILive::runSchedStages() {
if (GCNTrackers) {
GCNDownwardRPTracker *TheTracker = S.getTracker();
GCNUpwardRPTracker *TheUpwardTracker = S.getUpwardTracker();
- GCNRPTracker::LiveRegSet *RegionLiveIns = &LiveIns[Stage->getRegionIdx()];
-
- reinterpret_cast<GCNRPTracker *>(TheTracker)->reset(
- Regions[Stage->getRegionIdx()].first->getMF()->getRegInfo(),
- *RegionLiveIns);
- reinterpret_cast<GCNRPTracker *>(TheUpwardTracker)->reset(
- Regions[Stage->getRegionIdx()].first->getMF()->getRegInfo(),
- RegionLiveOuts.getLiveRegsForRegionIdx(Stage->getRegionIdx()));
-
+ GCNRPTracker::LiveRegSet *RegionLiveIns =
+ &LiveIns[Stage->getRegionIdx()];
+
+ reinterpret_cast<GCNRPTracker *>(TheTracker)
+ ->reset(Regions[Stage->getRegionIdx()].first->getMF()->getRegInfo(),
+ *RegionLiveIns);
+ reinterpret_cast<GCNRPTracker *>(TheUpwardTracker)
+ ->reset(
+ Regions[Stage->getRegionIdx()].first->getMF()->getRegInfo(),
+ RegionLiveOuts.getLiveRegsForRegionIdx(Stage->getRegionIdx()));
}
ScheduleDAGMILive::schedule();
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
index 554df736177fa..3b12001b8cfc7 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
@@ -128,7 +128,6 @@ class GCNSchedStrategy : public GenericScheduler {
GCNDownwardRPTracker *getTracker() { return &TheTracker; }
GCNUpwardRPTracker *getUpwardTracker() { return &TheUpwardTracker; }
-
};
/// The goal of this scheduling strategy is to maximize kernel occupancy (i.e.
@@ -355,7 +354,7 @@ class GCNSchedStage {
bool isRegionWithExcessRP() const {
return DAG.RegionsWithExcessRP[RegionIdx];
}
-
+
// The region number this stage is currently working on
unsigned getRegionIdx() { return RegionIdx; }
>From 03f676db4ad9e40c0711ff9f9193eb4a5146963d Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Mon, 27 May 2024 10:43:43 -0700
Subject: [PATCH 05/11] Actually use the iterative trackers
Change-Id: I198925f5ed91b0a49ac265e19fdbe2208139f09a
---
llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index 1172c0e43d707..6654e9ae228f7 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -172,7 +172,7 @@ static void getRegisterPressures(bool AtTop,
if (AtTop) {
GCNDownwardRPTracker TempTopTracker(TheTracker);
auto MI = SU->getInstr();
- TempTopTracker.advance(MI, true, DAG->getLIS());
+ TempTopTracker.advance(MI, false, DAG->getLIS());
Pressure[AMDGPU::RegisterPressureSets::SReg_32] =
TempTopTracker.getPressure().getSGPRNum();
@@ -183,7 +183,7 @@ static void getRegisterPressures(bool AtTop,
else {
GCNUpwardRPTracker TempBotTracker(TheUpwardTracker);
auto MI = SU->getInstr();
- TempBotTracker.recede(*MI, true);
+ TempBotTracker.recede(*MI, false);
Pressure[AMDGPU::RegisterPressureSets::SReg_32] =
TempBotTracker.getPressure().getSGPRNum();
@@ -489,8 +489,8 @@ SUnit *GCNSchedStrategy::pickNode(bool &IsTopNode) {
void GCNSchedStrategy::schedNode(SUnit *SU, bool IsTopNode) {
if (GCNTrackers) {
MachineInstr *MI = SU->getInstr();
- IsTopNode ? (void)TheTracker.advance(MI, true, DAG->getLIS())
- : TheUpwardTracker.recede(*MI, true);
+ IsTopNode ? (void)TheTracker.advance(MI, false, DAG->getLIS())
+ : TheUpwardTracker.recede(*MI, false);
}
return GenericScheduler::schedNode(SU, IsTopNode);
>From 076b3ab23ce905bd3baa5b90502d4eba9aa49a85 Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Tue, 28 May 2024 13:24:09 -0700
Subject: [PATCH 06/11] Review Comments
Change-Id: Ifa69110bf0a239ea14d25c0bad03215d1b018656
---
.../Target/AMDGPU/GCNIterativeScheduler.cpp | 2 +-
llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp | 51 +++++++++----------
llvm/lib/Target/AMDGPU/GCNSchedStrategy.h | 8 +--
3 files changed, 30 insertions(+), 31 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp b/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
index ccee5db9a3bb6..5c64fcbe96881 100644
--- a/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
@@ -480,7 +480,7 @@ void GCNIterativeScheduler::scheduleLegacyMaxOccupancy(
LLVM_DEBUG(dbgs() << "Scheduling using default scheduler, "
"target occupancy = "
<< TgtOcc << '\n');
- GCNMaxOccupancySchedStrategy LStrgy(Context, /*IsLegacyScheduler*/ true);
+ GCNMaxOccupancySchedStrategy LStrgy(Context, /*IsLegacyScheduler=*/ true);
unsigned FinalOccupancy = std::min(Occ, MFI->getOccupancy());
for (int I = 0; I < NumPasses; ++I) {
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index 6654e9ae228f7..aa0c914acc6a7 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -66,8 +66,8 @@ static cl::opt<bool> GCNTrackers(
const unsigned ScheduleMetrics::ScaleFactor = 100;
GCNSchedStrategy::GCNSchedStrategy(const MachineSchedContext *C)
- : GenericScheduler(C), TargetOccupancy(0), MF(nullptr), TheTracker(*C->LIS),
- TheUpwardTracker(*C->LIS), HasHighPressure(false) {}
+ : GenericScheduler(C), TargetOccupancy(0), MF(nullptr), DownwardTracker(*C->LIS),
+ UpwardTracker(*C->LIS), HasHighPressure(false) {}
void GCNSchedStrategy::initialize(ScheduleDAGMI *DAG) {
GenericScheduler::initialize(DAG);
@@ -157,8 +157,8 @@ static void getRegisterPressures(bool AtTop,
const RegPressureTracker &RPTracker, SUnit *SU,
std::vector<unsigned> &Pressure,
std::vector<unsigned> &MaxPressure,
- GCNDownwardRPTracker &TheTracker,
- GCNUpwardRPTracker &TheUpwardTracker,
+ GCNDownwardRPTracker &DownwardTracker,
+ GCNUpwardRPTracker &UpwardTracker,
ScheduleDAGMI *DAG) {
// getDownwardPressure() and getUpwardPressure() make temporary changes to
// the tracker, so we need to pass those function a non-const copy.
@@ -170,7 +170,7 @@ static void getRegisterPressures(bool AtTop,
TempTracker.getUpwardPressure(SU->getInstr(), Pressure, MaxPressure);
} else {
if (AtTop) {
- GCNDownwardRPTracker TempTopTracker(TheTracker);
+ GCNDownwardRPTracker TempTopTracker(DownwardTracker);
auto MI = SU->getInstr();
TempTopTracker.advance(MI, false, DAG->getLIS());
@@ -181,7 +181,7 @@ static void getRegisterPressures(bool AtTop,
}
else {
- GCNUpwardRPTracker TempBotTracker(TheUpwardTracker);
+ GCNUpwardRPTracker TempBotTracker(UpwardTracker);
auto MI = SU->getInstr();
TempBotTracker.recede(*MI, false);
@@ -221,7 +221,7 @@ void GCNSchedStrategy::initCandidate(SchedCandidate &Cand, SUnit *SU,
// PressureDiffs.
if (AtTop || !canUsePressureDiffs(*SU) || GCNTrackers) {
getRegisterPressures(AtTop, RPTracker, SU, Pressure, MaxPressure,
- TheTracker, TheUpwardTracker, DAG);
+ DownwardTracker, UpwardTracker, DAG);
} else {
// Reserve 4 slots.
Pressure.resize(4, 0);
@@ -240,7 +240,7 @@ void GCNSchedStrategy::initCandidate(SchedCandidate &Cand, SUnit *SU,
#ifdef EXPENSIVE_CHECKS
std::vector<unsigned> CheckPressure, CheckMaxPressure;
getRegisterPressures(AtTop, RPTracker, SU, CheckPressure, CheckMaxPressure,
- TheTracker, TheUpwardTracker, DAG);
+ TheTracker, UpwardTracker, DAG);
if (Pressure[AMDGPU::RegisterPressureSets::SReg_32] !=
CheckPressure[AMDGPU::RegisterPressureSets::SReg_32] ||
Pressure[AMDGPU::RegisterPressureSets::VGPR_32] !=
@@ -330,13 +330,13 @@ void GCNSchedStrategy::pickNodeFromQueue(SchedBoundary &Zone,
if (DAG->isTrackingPressure()) {
SGPRPressure =
GCNTrackers
- ? (Zone.isTop() ? TheTracker.getPressure().getSGPRNum()
- : TheUpwardTracker.getPressure().getSGPRNum())
+ ? (Zone.isTop() ? DownwardTracker.getPressure().getSGPRNum()
+ : UpwardTracker.getPressure().getSGPRNum())
: Pressure[AMDGPU::RegisterPressureSets::SReg_32];
VGPRPressure =
GCNTrackers
- ? (Zone.isTop() ? TheTracker.getPressure().getVGPRNum(false)
- : TheUpwardTracker.getPressure().getVGPRNum(false))
+ ? (Zone.isTop() ? DownwardTracker.getPressure().getVGPRNum(false)
+ : UpwardTracker.getPressure().getVGPRNum(false))
: Pressure[AMDGPU::RegisterPressureSets::VGPR_32];
}
ReadyQueue &Q = Zone.Available;
@@ -489,8 +489,8 @@ SUnit *GCNSchedStrategy::pickNode(bool &IsTopNode) {
void GCNSchedStrategy::schedNode(SUnit *SU, bool IsTopNode) {
if (GCNTrackers) {
MachineInstr *MI = SU->getInstr();
- IsTopNode ? (void)TheTracker.advance(MI, false, DAG->getLIS())
- : TheUpwardTracker.recede(*MI, false);
+ IsTopNode ? (void)DownwardTracker.advance(MI, false, DAG->getLIS())
+ : UpwardTracker.recede(*MI, false);
}
return GenericScheduler::schedNode(SU, IsTopNode);
@@ -835,18 +835,17 @@ void GCNScheduleDAGMILive::runSchedStages() {
}
if (GCNTrackers) {
- GCNDownwardRPTracker *TheTracker = S.getTracker();
- GCNUpwardRPTracker *TheUpwardTracker = S.getUpwardTracker();
- GCNRPTracker::LiveRegSet *RegionLiveIns =
- &LiveIns[Stage->getRegionIdx()];
-
- reinterpret_cast<GCNRPTracker *>(TheTracker)
- ->reset(Regions[Stage->getRegionIdx()].first->getMF()->getRegInfo(),
- *RegionLiveIns);
- reinterpret_cast<GCNRPTracker *>(TheUpwardTracker)
- ->reset(
- Regions[Stage->getRegionIdx()].first->getMF()->getRegInfo(),
- RegionLiveOuts.getLiveRegsForRegionIdx(Stage->getRegionIdx()));
+ GCNDownwardRPTracker *DownwardTracker = S.getDownwardTracker();
+ GCNUpwardRPTracker *UpwardTracker = S.getUpwardTracker();
+ GCNRPTracker::LiveRegSet *RegionLiveIns = &LiveIns[Stage->getRegionIdx()];
+
+ reinterpret_cast<GCNRPTracker *>(DownwardTracker)->reset(
+ Regions[Stage->getRegionIdx()].first->getMF()->getRegInfo(),
+ *RegionLiveIns);
+ reinterpret_cast<GCNRPTracker *>(UpwardTracker)->reset(
+ Regions[Stage->getRegionIdx()].first->getMF()->getRegInfo(),
+ RegionLiveOuts.getLiveRegsForRegionIdx(Stage->getRegionIdx()));
+
}
ScheduleDAGMILive::schedule();
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
index 3b12001b8cfc7..d473442eefd88 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
@@ -71,10 +71,10 @@ class GCNSchedStrategy : public GenericScheduler {
SmallVectorImpl<GCNSchedStageID>::iterator CurrentStage = nullptr;
// GCN RP Tracker for top-down scheduling
- mutable GCNDownwardRPTracker TheTracker;
+ mutable GCNDownwardRPTracker DownwardTracker;
// GCN RP Tracker for botttom-up scheduling
- mutable GCNUpwardRPTracker TheUpwardTracker;
+ mutable GCNUpwardRPTracker UpwardTracker;
public:
// schedule() have seen register pressure over the critical limits and had to
@@ -125,9 +125,9 @@ class GCNSchedStrategy : public GenericScheduler {
GCNSchedStageID getNextStage() const;
- GCNDownwardRPTracker *getTracker() { return &TheTracker; }
+ GCNDownwardRPTracker *getDownwardTracker() { return &DownwardTracker; }
- GCNUpwardRPTracker *getUpwardTracker() { return &TheUpwardTracker; }
+ GCNUpwardRPTracker *getUpwardTracker() { return &UpwardTracker; }
};
/// The goal of this scheduling strategy is to maximize kernel occupancy (i.e.
>From 506890b83f69776adce9f591b53d3f571473629b Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Tue, 28 May 2024 13:29:41 -0700
Subject: [PATCH 07/11] Use DAG.MRI
Change-Id: I9f0275a0cede9e77dfd29262124f2a856f436c8c
---
llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index aa0c914acc6a7..e7ea7dcde0606 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -839,13 +839,11 @@ void GCNScheduleDAGMILive::runSchedStages() {
GCNUpwardRPTracker *UpwardTracker = S.getUpwardTracker();
GCNRPTracker::LiveRegSet *RegionLiveIns = &LiveIns[Stage->getRegionIdx()];
- reinterpret_cast<GCNRPTracker *>(DownwardTracker)->reset(
- Regions[Stage->getRegionIdx()].first->getMF()->getRegInfo(),
- *RegionLiveIns);
- reinterpret_cast<GCNRPTracker *>(UpwardTracker)->reset(
- Regions[Stage->getRegionIdx()].first->getMF()->getRegInfo(),
- RegionLiveOuts.getLiveRegsForRegionIdx(Stage->getRegionIdx()));
-
+ reinterpret_cast<GCNRPTracker *>(DownwardTracker)
+ ->reset(MRI, *RegionLiveIns);
+ reinterpret_cast<GCNRPTracker *>(UpwardTracker)
+ ->reset(MRI, RegionLiveOuts.getLiveRegsForRegionIdx(
+ Stage->getRegionIdx()));
}
ScheduleDAGMILive::schedule();
>From a8f567963ac0070000b88907a233ace0ee2aeace Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Tue, 28 May 2024 13:52:29 -0700
Subject: [PATCH 08/11] Formatting
Change-Id: I74c19a2cf20d2325178933f81e0e8716d7c62f17
---
llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp | 2 +-
llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp | 15 ++++++++-------
2 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp b/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
index 5c64fcbe96881..f9223913326fa 100644
--- a/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
@@ -480,7 +480,7 @@ void GCNIterativeScheduler::scheduleLegacyMaxOccupancy(
LLVM_DEBUG(dbgs() << "Scheduling using default scheduler, "
"target occupancy = "
<< TgtOcc << '\n');
- GCNMaxOccupancySchedStrategy LStrgy(Context, /*IsLegacyScheduler=*/ true);
+ GCNMaxOccupancySchedStrategy LStrgy(Context, /*IsLegacyScheduler=*/true);
unsigned FinalOccupancy = std::min(Occ, MFI->getOccupancy());
for (int I = 0; I < NumPasses; ++I) {
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index e7ea7dcde0606..04c22273ae0b3 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -66,8 +66,9 @@ static cl::opt<bool> GCNTrackers(
const unsigned ScheduleMetrics::ScaleFactor = 100;
GCNSchedStrategy::GCNSchedStrategy(const MachineSchedContext *C)
- : GenericScheduler(C), TargetOccupancy(0), MF(nullptr), DownwardTracker(*C->LIS),
- UpwardTracker(*C->LIS), HasHighPressure(false) {}
+ : GenericScheduler(C), TargetOccupancy(0), MF(nullptr),
+ DownwardTracker(*C->LIS), UpwardTracker(*C->LIS), HasHighPressure(false) {
+}
void GCNSchedStrategy::initialize(ScheduleDAGMI *DAG) {
GenericScheduler::initialize(DAG);
@@ -329,10 +330,9 @@ void GCNSchedStrategy::pickNodeFromQueue(SchedBoundary &Zone,
unsigned VGPRPressure = 0;
if (DAG->isTrackingPressure()) {
SGPRPressure =
- GCNTrackers
- ? (Zone.isTop() ? DownwardTracker.getPressure().getSGPRNum()
- : UpwardTracker.getPressure().getSGPRNum())
- : Pressure[AMDGPU::RegisterPressureSets::SReg_32];
+ GCNTrackers ? (Zone.isTop() ? DownwardTracker.getPressure().getSGPRNum()
+ : UpwardTracker.getPressure().getSGPRNum())
+ : Pressure[AMDGPU::RegisterPressureSets::SReg_32];
VGPRPressure =
GCNTrackers
? (Zone.isTop() ? DownwardTracker.getPressure().getVGPRNum(false)
@@ -837,7 +837,8 @@ void GCNScheduleDAGMILive::runSchedStages() {
if (GCNTrackers) {
GCNDownwardRPTracker *DownwardTracker = S.getDownwardTracker();
GCNUpwardRPTracker *UpwardTracker = S.getUpwardTracker();
- GCNRPTracker::LiveRegSet *RegionLiveIns = &LiveIns[Stage->getRegionIdx()];
+ GCNRPTracker::LiveRegSet *RegionLiveIns =
+ &LiveIns[Stage->getRegionIdx()];
reinterpret_cast<GCNRPTracker *>(DownwardTracker)
->reset(MRI, *RegionLiveIns);
>From 1c4a37022b87dc95dc06e1bfc7a5bd74d5ef260c Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Fri, 14 Jun 2024 15:03:02 -0700
Subject: [PATCH 09/11] Review comments
Change-Id: I09f9ca74c07b516daed0e93a85937df8b9aa922b
---
llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp | 19 ++++++++++---------
1 file changed, 10 insertions(+), 9 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index 04c22273ae0b3..786cf2894d104 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -329,15 +329,16 @@ void GCNSchedStrategy::pickNodeFromQueue(SchedBoundary &Zone,
unsigned SGPRPressure = 0;
unsigned VGPRPressure = 0;
if (DAG->isTrackingPressure()) {
- SGPRPressure =
- GCNTrackers ? (Zone.isTop() ? DownwardTracker.getPressure().getSGPRNum()
- : UpwardTracker.getPressure().getSGPRNum())
- : Pressure[AMDGPU::RegisterPressureSets::SReg_32];
- VGPRPressure =
- GCNTrackers
- ? (Zone.isTop() ? DownwardTracker.getPressure().getVGPRNum(false)
- : UpwardTracker.getPressure().getVGPRNum(false))
- : Pressure[AMDGPU::RegisterPressureSets::VGPR_32];
+ if (!GCNTrackers) {
+ SGPRPressure = Pressure[AMDGPU::RegisterPressureSets::SReg_32];
+ VGPRPressure = Pressure[AMDGPU::RegisterPressureSets::VGPR_32];
+ } else {
+ GCNRPTracker *T = &UpwardTracker;
+ if (Zone.isTop())
+ T = &DownwardTracker;
+ SGPRPressure = T->getPressure().getSGPRNum();
+ VGPRPressure = T->getPressure().getVGPRNum(false);
+ }
}
ReadyQueue &Q = Zone.Available;
for (SUnit *SU : Q) {
>From 40acb81b5a6d66f935da72b52a69013537635e3e Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Fri, 14 Jun 2024 16:14:57 -0700
Subject: [PATCH 10/11] Allocate Pressure vector
Change-Id: I5effce973fa2d945076e89b4453a844f0fc85fc9
---
llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp | 1 +
1 file changed, 1 insertion(+)
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index 786cf2894d104..92997c47677d9 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -170,6 +170,7 @@ static void getRegisterPressures(bool AtTop,
else
TempTracker.getUpwardPressure(SU->getInstr(), Pressure, MaxPressure);
} else {
+ Pressure.resize(4, 0);
if (AtTop) {
GCNDownwardRPTracker TempTopTracker(DownwardTracker);
auto MI = SU->getInstr();
>From 3ac3636bc2b41592555ff29046a5231abc9396b5 Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Tue, 18 Jun 2024 11:39:48 -0700
Subject: [PATCH 11/11] Remove flag from upward RPTracker
Change-Id: I6217c03f56d34f584e5b23cf7c4462842bc7173b
---
llvm/lib/Target/AMDGPU/GCNRegPressure.cpp | 6 ++----
llvm/lib/Target/AMDGPU/GCNRegPressure.h | 2 +-
llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp | 4 ++--
3 files changed, 5 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp b/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
index a2d76807d3a71..dd2da917308d5 100644
--- a/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
@@ -354,12 +354,10 @@ void GCNRPTracker::reset(const MachineRegisterInfo &MRI_,
////////////////////////////////////////////////////////////////////////////////
// GCNUpwardRPTracker
-void GCNUpwardRPTracker::recede(const MachineInstr &MI,
- bool UseInternalIterator) {
+void GCNUpwardRPTracker::recede(const MachineInstr &MI) {
assert(MRI && "call reset first");
- if (UseInternalIterator)
- LastTrackedMI = &MI;
+ LastTrackedMI = &MI;
if (MI.isDebugInstr())
return;
diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.h b/llvm/lib/Target/AMDGPU/GCNRegPressure.h
index 671eae2e3c4ad..5b7afe32adcac 100644
--- a/llvm/lib/Target/AMDGPU/GCNRegPressure.h
+++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.h
@@ -209,7 +209,7 @@ class GCNUpwardRPTracker : public GCNRPTracker {
/// Move to the state of RP just before the \p MI . If \p UseInternalIterator
/// is set, also update the internal iterators. Setting \p UseInternalIterator
/// to false allows for an externally managed iterator / program order.
- void recede(const MachineInstr &MI, bool UseInternalIterator = true);
+ void recede(const MachineInstr &MI);
/// \p returns whether the tracker's state after receding MI corresponds
/// to reported by LIS.
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index 92997c47677d9..7ff3ccaec8510 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -185,7 +185,7 @@ static void getRegisterPressures(bool AtTop,
else {
GCNUpwardRPTracker TempBotTracker(UpwardTracker);
auto MI = SU->getInstr();
- TempBotTracker.recede(*MI, false);
+ TempBotTracker.recede(*MI);
Pressure[AMDGPU::RegisterPressureSets::SReg_32] =
TempBotTracker.getPressure().getSGPRNum();
@@ -492,7 +492,7 @@ void GCNSchedStrategy::schedNode(SUnit *SU, bool IsTopNode) {
if (GCNTrackers) {
MachineInstr *MI = SU->getInstr();
IsTopNode ? (void)DownwardTracker.advance(MI, false, DAG->getLIS())
- : UpwardTracker.recede(*MI, false);
+ : UpwardTracker.recede(*MI);
}
return GenericScheduler::schedNode(SU, IsTopNode);
More information about the llvm-commits
mailing list