[llvm] r296506 - [AMDGPU] Add second pass of the scheduler
Stanislav Mekhanoshin via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 28 11:20:34 PST 2017
Author: rampitec
Date: Tue Feb 28 13:20:33 2017
New Revision: 296506
URL: http://llvm.org/viewvc/llvm-project?rev=296506&view=rev
Log:
[AMDGPU] Add second pass of the scheduler
If during scheduling we have identified that we cannot keep optimistic
occupancy increase critical register pressure limit and try scheduling
of the whole function again. In this case blocks with smaller pressure
will have a chance for better scheduling.
Differential Revision: https://reviews.llvm.org/D30442
Modified:
llvm/trunk/lib/Target/AMDGPU/GCNSchedStrategy.cpp
llvm/trunk/lib/Target/AMDGPU/GCNSchedStrategy.h
Modified: llvm/trunk/lib/Target/AMDGPU/GCNSchedStrategy.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/GCNSchedStrategy.cpp?rev=296506&r1=296505&r2=296506&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/GCNSchedStrategy.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/GCNSchedStrategy.cpp Tue Feb 28 13:20:33 2017
@@ -26,7 +26,7 @@ using namespace llvm;
GCNMaxOccupancySchedStrategy::GCNMaxOccupancySchedStrategy(
const MachineSchedContext *C) :
- GenericScheduler(C) { }
+ GenericScheduler(C), TargetOccupancy(0), MF(nullptr) { }
static unsigned getMaxWaves(unsigned SGPRs, unsigned VGPRs,
const MachineFunction &MF) {
@@ -45,6 +45,12 @@ void GCNMaxOccupancySchedStrategy::initi
const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo*>(TRI);
+ if (MF != &DAG->MF)
+ TargetOccupancy = 0;
+ MF = &DAG->MF;
+
+ const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
+
// FIXME: This is also necessary, because some passes that run after
// scheduling and before regalloc increase register pressure.
const int ErrorMargin = 3;
@@ -53,10 +59,18 @@ void GCNMaxOccupancySchedStrategy::initi
->getNumAllocatableRegs(&AMDGPU::SGPR_32RegClass) - ErrorMargin;
VGPRExcessLimit = Context->RegClassInfo
->getNumAllocatableRegs(&AMDGPU::VGPR_32RegClass) - ErrorMargin;
- SGPRCriticalLimit = SRI->getRegPressureSetLimit(DAG->MF,
- SRI->getSGPRPressureSet()) - ErrorMargin;
- VGPRCriticalLimit = SRI->getRegPressureSetLimit(DAG->MF,
- SRI->getVGPRPressureSet()) - ErrorMargin;
+ if (TargetOccupancy) {
+ SGPRCriticalLimit = ST.getMaxNumSGPRs(TargetOccupancy, true);
+ VGPRCriticalLimit = ST.getMaxNumVGPRs(TargetOccupancy);
+ } else {
+ SGPRCriticalLimit = SRI->getRegPressureSetLimit(DAG->MF,
+ SRI->getSGPRPressureSet());
+ VGPRCriticalLimit = SRI->getRegPressureSetLimit(DAG->MF,
+ SRI->getVGPRPressureSet());
+ }
+
+ SGPRCriticalLimit -= ErrorMargin;
+ VGPRCriticalLimit -= ErrorMargin;
}
void GCNMaxOccupancySchedStrategy::initCandidate(SchedCandidate &Cand, SUnit *SU,
@@ -309,6 +323,28 @@ SUnit *GCNMaxOccupancySchedStrategy::pic
return SU;
}
+GCNScheduleDAGMILive::GCNScheduleDAGMILive(MachineSchedContext *C,
+ std::unique_ptr<MachineSchedStrategy> S) :
+ ScheduleDAGMILive(C, std::move(S)),
+ ST(MF.getSubtarget<SISubtarget>()),
+ MFI(*MF.getInfo<SIMachineFunctionInfo>()),
+ StartingOccupancy(ST.getOccupancyWithLocalMemSize(MFI.getLDSSize(),
+ *MF.getFunction())),
+ MinOccupancy(StartingOccupancy), Stage(0) {
+
+ DEBUG(dbgs() << "Starting occupancy is " << StartingOccupancy << ".\n");
+}
+
+void GCNScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
+ MachineBasicBlock::iterator begin,
+ MachineBasicBlock::iterator end,
+ unsigned regioninstrs) {
+ ScheduleDAGMILive::enterRegion(bb, begin, end, regioninstrs);
+
+ if (Stage == 0)
+ Regions.push_back(std::make_pair(begin, end));
+}
+
void GCNScheduleDAGMILive::schedule() {
std::vector<MachineInstr*> Unsched;
Unsched.reserve(NumRegionInstrs);
@@ -344,6 +380,15 @@ void GCNScheduleDAGMILive::schedule() {
DEBUG(dbgs() << "Occupancy before scheduling: " << WavesBefore <<
", after " << WavesAfter << ".\n");
+ // We could not keep current target occupancy because of the just scheduled
+ // region. Record new occupancy for next scheduling cycle.
+ unsigned NewOccupancy = std::max(WavesAfter, WavesBefore);
+ if (NewOccupancy < MinOccupancy) {
+ MinOccupancy = NewOccupancy;
+ DEBUG(dbgs() << "Occupancy lowered for the function to "
+ << MinOccupancy << ".\n");
+ }
+
if (WavesAfter >= WavesBefore)
return;
@@ -485,5 +530,52 @@ GCNScheduleDAGMILive::getRealRegPressure
}
void GCNScheduleDAGMILive::finalizeSchedule() {
+ // Retry function scheduling if we found resulting occupancy and it is
+ // lower than used for first pass scheduling. This will give more freedom
+ // to schedule low register pressure blocks.
+ // Code is partially copied from MachineSchedulerBase::scheduleRegions().
+
+ if (!LIS || StartingOccupancy <= MinOccupancy)
+ return;
+
+ DEBUG(dbgs() << "Retrying function scheduling with lowest recorded occupancy "
+ << MinOccupancy << ".\n");
+
+ Stage++;
+ GCNMaxOccupancySchedStrategy &S = (GCNMaxOccupancySchedStrategy&)*SchedImpl;
+ S.TargetOccupancy = MinOccupancy;
+
+ MachineBasicBlock *MBB = nullptr;
+ for (auto Region : Regions) {
+ RegionBegin = Region.first;
+ RegionEnd = Region.second;
+
+ if (RegionBegin->getParent() != MBB) {
+ if (MBB) finishBlock();
+ MBB = RegionBegin->getParent();
+ startBlock(MBB);
+ }
+
+ unsigned NumRegionInstrs = std::distance(begin(), end());
+ enterRegion(MBB, begin(), end(), NumRegionInstrs);
+
+ // Skip empty scheduling regions (0 or 1 schedulable instructions).
+ if (begin() == end() || begin() == std::prev(end())) {
+ exitRegion();
+ continue;
+ }
+ DEBUG(dbgs() << "********** MI Scheduling **********\n");
+ DEBUG(dbgs() << MF.getName()
+ << ":BB#" << MBB->getNumber() << " " << MBB->getName()
+ << "\n From: " << *begin() << " To: ";
+ if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
+ else dbgs() << "End";
+ dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
+
+ schedule();
+
+ exitRegion();
+ }
+ finishBlock();
LiveIns.shrink_and_clear();
}
Modified: llvm/trunk/lib/Target/AMDGPU/GCNSchedStrategy.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/GCNSchedStrategy.h?rev=296506&r1=296505&r2=296506&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/GCNSchedStrategy.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/GCNSchedStrategy.h Tue Feb 28 13:20:33 2017
@@ -18,7 +18,9 @@
namespace llvm {
+class SIMachineFunctionInfo;
class SIRegisterInfo;
+class SISubtarget;
/// This is a minimal scheduler strategy. The main difference between this
/// and the GenericScheduler is that GCNSchedStrategy uses different
@@ -43,6 +45,10 @@ class GCNMaxOccupancySchedStrategy : pub
unsigned SGPRCriticalLimit;
unsigned VGPRCriticalLimit;
+ unsigned TargetOccupancy;
+
+ MachineFunction *MF;
+
public:
GCNMaxOccupancySchedStrategy(const MachineSchedContext *C);
@@ -53,6 +59,23 @@ public:
class GCNScheduleDAGMILive : public ScheduleDAGMILive {
+ const SISubtarget &ST;
+
+ const SIMachineFunctionInfo &MFI;
+
+ // Occupancy target at the begining of function scheduling cycle.
+ unsigned StartingOccupancy;
+
+ // Minimal real occupancy recorder for the function.
+ unsigned MinOccupancy;
+
+ // Scheduling stage number.
+ unsigned Stage;
+
+ // Vecor of regions recorder for later rescheduling
+ SmallVector<std::pair<const MachineBasicBlock::iterator,
+ const MachineBasicBlock::iterator>, 32> Regions;
+
// Region live-ins.
DenseMap<unsigned, LaneBitmask> LiveIns;
@@ -67,8 +90,12 @@ class GCNScheduleDAGMILive : public Sche
public:
GCNScheduleDAGMILive(MachineSchedContext *C,
- std::unique_ptr<MachineSchedStrategy> S) :
- ScheduleDAGMILive(C, std::move(S)) {}
+ std::unique_ptr<MachineSchedStrategy> S);
+
+ void enterRegion(MachineBasicBlock *bb,
+ MachineBasicBlock::iterator begin,
+ MachineBasicBlock::iterator end,
+ unsigned regioninstrs) override;
void schedule() override;
More information about the llvm-commits
mailing list