[llvm] [AMDGPU][MISCHED] GCNBalancedSchedStrategy. (PR #66634)

Jeffrey Byrnes via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 18 10:01:41 PDT 2023


================
@@ -981,193 +978,52 @@ void GCNSchedStage::checkScheduling() {
   }
 }
 
-unsigned
-GCNSchedStage::computeSUnitReadyCycle(const SUnit &SU, unsigned CurrCycle,
-                                      DenseMap<unsigned, unsigned> &ReadyCycles,
-                                      const TargetSchedModel &SM) {
-  unsigned ReadyCycle = CurrCycle;
-  for (auto &D : SU.Preds) {
-    if (D.isAssignedRegDep()) {
-      MachineInstr *DefMI = D.getSUnit()->getInstr();
-      unsigned Latency = SM.computeInstrLatency(DefMI);
-      unsigned DefReady = ReadyCycles[DAG.getSUnit(DefMI)->NodeNum];
-      ReadyCycle = std::max(ReadyCycle, DefReady + Latency);
-    }
-  }
-  ReadyCycles[SU.NodeNum] = ReadyCycle;
-  return ReadyCycle;
-}
-
-#ifndef NDEBUG
-struct EarlierIssuingCycle {
-  bool operator()(std::pair<MachineInstr *, unsigned> A,
-                  std::pair<MachineInstr *, unsigned> B) const {
-    return A.second < B.second;
-  }
-};
-
-static void printScheduleModel(std::set<std::pair<MachineInstr *, unsigned>,
-                                        EarlierIssuingCycle> &ReadyCycles) {
-  if (ReadyCycles.empty())
-    return;
-  unsigned BBNum = ReadyCycles.begin()->first->getParent()->getNumber();
-  dbgs() << "\n################## Schedule time ReadyCycles for MBB : " << BBNum
-         << " ##################\n# Cycle #\t\t\tInstruction          "
-            "             "
-            "                            \n";
-  unsigned IPrev = 1;
-  for (auto &I : ReadyCycles) {
-    if (I.second > IPrev + 1)
-      dbgs() << "****************************** BUBBLE OF " << I.second - IPrev
-             << " CYCLES DETECTED ******************************\n\n";
-    dbgs() << "[ " << I.second << " ]  :  " << *I.first << "\n";
-    IPrev = I.second;
-  }
-}
-#endif
-
-ScheduleMetrics
-GCNSchedStage::getScheduleMetrics(const std::vector<SUnit> &InputSchedule) {
-#ifndef NDEBUG
-  std::set<std::pair<MachineInstr *, unsigned>, EarlierIssuingCycle>
-      ReadyCyclesSorted;
-#endif
-  const TargetSchedModel &SM = ST.getInstrInfo()->getSchedModel();
-  unsigned SumBubbles = 0;
-  DenseMap<unsigned, unsigned> ReadyCycles;
-  unsigned CurrCycle = 0;
-  for (auto &SU : InputSchedule) {
-    unsigned ReadyCycle =
-        computeSUnitReadyCycle(SU, CurrCycle, ReadyCycles, SM);
-    SumBubbles += ReadyCycle - CurrCycle;
-#ifndef NDEBUG
-    ReadyCyclesSorted.insert(std::make_pair(SU.getInstr(), ReadyCycle));
-#endif
-    CurrCycle = ++ReadyCycle;
-  }
-#ifndef NDEBUG
-  LLVM_DEBUG(
-      printScheduleModel(ReadyCyclesSorted);
-      dbgs() << "\n\t"
-             << "Metric: "
-             << (SumBubbles
-                     ? (SumBubbles * ScheduleMetrics::ScaleFactor) / CurrCycle
-                     : 1)
-             << "\n\n");
-#endif
-
-  return ScheduleMetrics(CurrCycle, SumBubbles);
-}
-
-ScheduleMetrics
-GCNSchedStage::getScheduleMetrics(const GCNScheduleDAGMILive &DAG) {
-#ifndef NDEBUG
-  std::set<std::pair<MachineInstr *, unsigned>, EarlierIssuingCycle>
-      ReadyCyclesSorted;
-#endif
-  const TargetSchedModel &SM = ST.getInstrInfo()->getSchedModel();
-  unsigned SumBubbles = 0;
-  DenseMap<unsigned, unsigned> ReadyCycles;
-  unsigned CurrCycle = 0;
-  for (auto &MI : DAG) {
-    SUnit *SU = DAG.getSUnit(&MI);
-    if (!SU)
-      continue;
-    unsigned ReadyCycle =
-        computeSUnitReadyCycle(*SU, CurrCycle, ReadyCycles, SM);
-    SumBubbles += ReadyCycle - CurrCycle;
-#ifndef NDEBUG
-    ReadyCyclesSorted.insert(std::make_pair(SU->getInstr(), ReadyCycle));
-#endif
-    CurrCycle = ++ReadyCycle;
-  }
-#ifndef NDEBUG
-  LLVM_DEBUG(
-      printScheduleModel(ReadyCyclesSorted);
-      dbgs() << "\n\t"
-             << "Metric: "
-             << (SumBubbles
-                     ? (SumBubbles * ScheduleMetrics::ScaleFactor) / CurrCycle
-                     : 1)
-             << "\n\n");
-#endif
-
-  return ScheduleMetrics(CurrCycle, SumBubbles);
-}
-
-bool GCNSchedStage::shouldRevertScheduling(unsigned WavesAfter) {
+bool GCNSchedStage::shouldRevertScheduling(unsigned WavesAfter,
+                                           unsigned WavesBefore) {
   if (WavesAfter < DAG.MinOccupancy)
     return true;
 
   return false;
 }
 
-bool OccInitialScheduleStage::shouldRevertScheduling(unsigned WavesAfter) {
+bool OccInitialScheduleStage::shouldRevertScheduling(unsigned WavesAfter,
+                                                     unsigned WavesBefore) {
   if (PressureAfter == PressureBefore)
     return false;
 
-  if (GCNSchedStage::shouldRevertScheduling(WavesAfter))
-    return true;
-
   if (mayCauseSpilling(WavesAfter))
     return true;
 
-  return false;
+  return S.computeScheduleMetric(RegionIdx, WavesAfter, WavesBefore);
 }
 
-bool UnclusteredHighRPStage::shouldRevertScheduling(unsigned WavesAfter) {
-  // If RP is not reduced in the unclustred reschedule stage, revert to the
-  // old schedule.
-  if ((WavesAfter <= PressureBefore.getOccupancy(ST) &&
-       mayCauseSpilling(WavesAfter)) ||
-      GCNSchedStage::shouldRevertScheduling(WavesAfter)) {
-    LLVM_DEBUG(dbgs() << "Unclustered reschedule did not help.\n");
-    return true;
-  }
-
-  // Do not attempt to relax schedule even more if we are already spilling.
-  if (isRegionWithExcessRP())
-    return false;
-
-  LLVM_DEBUG(
-      dbgs()
-      << "\n\t      *** In shouldRevertScheduling ***\n"
-      << "      *********** BEFORE UnclusteredHighRPStage ***********\n");
-  ScheduleMetrics MBefore =
-      getScheduleMetrics(DAG.SUnits);
-  LLVM_DEBUG(
-      dbgs()
-      << "\n      *********** AFTER UnclusteredHighRPStage ***********\n");
-  ScheduleMetrics MAfter = getScheduleMetrics(DAG);
-  unsigned OldMetric = MBefore.getMetric();
-  unsigned NewMetric = MAfter.getMetric();
-  unsigned WavesBefore =
-      std::min(S.getTargetOccupancy(), PressureBefore.getOccupancy(ST));
-  unsigned Profit =
-      ((WavesAfter * ScheduleMetrics::ScaleFactor) / WavesBefore *
-       ((OldMetric + ScheduleMetricBias) * ScheduleMetrics::ScaleFactor) /
-       NewMetric) /
-      ScheduleMetrics::ScaleFactor;
-  LLVM_DEBUG(dbgs() << "\tMetric before " << MBefore << "\tMetric after "
-                    << MAfter << "Profit: " << Profit << "\n");
-  return Profit < ScheduleMetrics::ScaleFactor;
+bool UnclusteredHighRPStage::shouldRevertScheduling(unsigned WavesAfter,
+                                                    unsigned WavesBefore) {
+  // Revert if may cause spilling. Otherwise rely on the metric computed by the
+  // strategy class. Exception: does not make sense to revert the unclustered
+  // schedule if we are still in excess RP state as it will not become better.
+  return GCNSchedStage::shouldRevertScheduling(WavesAfter, WavesBefore) ||
----------------
jrbyrnes wrote:

Does this preserve the behavior of MaxOccupancy scheduler?

https://github.com/llvm/llvm-project/pull/66634


More information about the llvm-commits mailing list