[llvm-commits] [llvm] r151228 - in /llvm/trunk: include/llvm/ADT/SparseSet.h lib/CodeGen/ScheduleDAGInstrs.cpp lib/CodeGen/ScheduleDAGInstrs.h
Andrew Trick
atrick at apple.com
Wed Feb 22 17:52:38 PST 2012
Author: atrick
Date: Wed Feb 22 19:52:38 2012
New Revision: 151228
URL: http://llvm.org/viewvc/llvm-project?rev=151228&view=rev
Log:
PostRASched: Convert physreg def/use tracking to Jakob's SparseSet.
Added array subscript to SparseSet for convenience.
Slight reorg to make it easier to manage the def/use sets.
Modified:
llvm/trunk/include/llvm/ADT/SparseSet.h
llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.cpp
llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.h
Modified: llvm/trunk/include/llvm/ADT/SparseSet.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/ADT/SparseSet.h?rev=151228&r1=151227&r2=151228&view=diff
==============================================================================
--- llvm/trunk/include/llvm/ADT/SparseSet.h (original)
+++ llvm/trunk/include/llvm/ADT/SparseSet.h Wed Feb 22 19:52:38 2012
@@ -213,6 +213,13 @@
return std::make_pair(end() - 1, true);
}
+ /// array subscript - If an element already exists with this key, return it.
+ /// Otherwise, automatically construct a new value from Key, insert it,
+ /// and return the newly inserted element.
+ ValueT &operator[](unsigned Key) {
+ return *insert(ValueT(Key)).first;
+ }
+
/// erase - Erases an existing element identified by a valid iterator.
///
/// This invalidates all iterators, but erase() returns an iterator pointing
Modified: llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.cpp?rev=151228&r1=151227&r2=151228&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.cpp (original)
+++ llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.cpp Wed Feb 22 19:52:38 2012
@@ -39,9 +39,7 @@
LiveIntervals *lis)
: ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()),
InstrItins(mf.getTarget().getInstrItineraryData()), IsPostRA(IsPostRAFlag),
- LIS(lis), UnitLatencies(false),
- Defs(TRI->getNumRegs()), Uses(TRI->getNumRegs()),
- LoopRegs(MLI, MDT), FirstDbgValue(0) {
+ LIS(lis), UnitLatencies(false), LoopRegs(MLI, MDT), FirstDbgValue(0) {
assert((IsPostRA || LIS) && "PreRA scheduling requires LiveIntervals");
DbgValues.clear();
assert(!(IsPostRA && MRI.getNumVirtRegs()) &&
@@ -173,7 +171,7 @@
if (Reg == 0) continue;
if (TRI->isPhysicalRegister(Reg))
- Uses[Reg].push_back(&ExitSU);
+ Uses[Reg].SUnits.push_back(&ExitSU);
else
assert(!IsPostRA && "Virtual register encountered after regalloc.");
}
@@ -187,59 +185,27 @@
E = (*SI)->livein_end(); I != E; ++I) {
unsigned Reg = *I;
if (Seen.insert(Reg))
- Uses[Reg].push_back(&ExitSU);
+ Uses[Reg].SUnits.push_back(&ExitSU);
}
}
}
-/// addPhysRegDeps - Add register dependencies (data, anti, and output) from
-/// this SUnit to following instructions in the same scheduling region that
-/// depend the physical register referenced at OperIdx.
-void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {
- const MachineInstr *MI = SU->getInstr();
- const MachineOperand &MO = MI->getOperand(OperIdx);
- unsigned Reg = MO.getReg();
+/// MO is an operand of SU's instruction that defines a physical register. Add
+/// data dependencies from SU to any uses of the physical register.
+void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU,
+ const MachineOperand &MO) {
+ assert(MO.isDef() && "expect physreg def");
// Ask the target if address-backscheduling is desirable, and if so how much.
const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
+ unsigned DataLatency = SU->Latency;
- // Optionally add output and anti dependencies. For anti
- // dependencies we use a latency of 0 because for a multi-issue
- // target we want to allow the defining instruction to issue
- // in the same cycle as the using instruction.
- // TODO: Using a latency of 1 here for output dependencies assumes
- // there's no cost for reusing registers.
- SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output;
- for (const unsigned *Alias = TRI->getOverlaps(Reg); *Alias; ++Alias) {
- std::vector<SUnit *> &DefList = Defs[*Alias];
- for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
- SUnit *DefSU = DefList[i];
- if (DefSU == &ExitSU)
- continue;
- if (DefSU != SU &&
- (Kind != SDep::Output || !MO.isDead() ||
- !DefSU->getInstr()->registerDefIsDead(*Alias))) {
- if (Kind == SDep::Anti)
- DefSU->addPred(SDep(SU, Kind, 0, /*Reg=*/*Alias));
- else {
- unsigned AOLat = TII->getOutputLatency(InstrItins, MI, OperIdx,
- DefSU->getInstr());
- DefSU->addPred(SDep(SU, Kind, AOLat, /*Reg=*/*Alias));
- }
- }
- }
- }
-
- // Retrieve the UseList to add data dependencies and update uses.
- std::vector<SUnit *> &UseList = Uses[Reg];
- if (MO.isDef()) {
- // Update DefList. Defs are pushed in the order they are visited and
- // never reordered.
- std::vector<SUnit *> &DefList = Defs[Reg];
-
- // Add any data dependencies.
- unsigned DataLatency = SU->Latency;
+ for (const unsigned *Alias = TRI->getOverlaps(MO.getReg()); *Alias; ++Alias) {
+ Reg2SUnitsMap::iterator UsesI = Uses.find(*Alias);
+ if (UsesI == Uses.end())
+ continue;
+ std::vector<SUnit*> &UseList = UsesI->SUnits;
for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
SUnit *UseSU = UseList[i];
if (UseSU == SU)
@@ -247,7 +213,6 @@
unsigned LDataLatency = DataLatency;
// Optionally add in a special extra latency for nodes that
// feed addresses.
- // TODO: Do this for register aliases too.
// TODO: Perhaps we should get rid of
// SpecialAddressLatency and just move this into
// adjustSchedDependency for the targets that care about it.
@@ -255,8 +220,8 @@
UseSU != &ExitSU) {
MachineInstr *UseMI = UseSU->getInstr();
const MCInstrDesc &UseMCID = UseMI->getDesc();
- int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg);
- assert(RegUseIndex >= 0 && "UseMI doesn's use register!");
+ int RegUseIndex = UseMI->findRegisterUseOperandIdx(*Alias);
+ assert(RegUseIndex >= 0 && "UseMI doesn't use register!");
if (RegUseIndex >= 0 &&
(UseMI->mayLoad() || UseMI->mayStore()) &&
(unsigned)RegUseIndex < UseMCID.getNumOperands() &&
@@ -266,38 +231,79 @@
// Adjust the dependence latency using operand def/use
// information (if any), and then allow the target to
// perform its own adjustments.
- const SDep& dep = SDep(SU, SDep::Data, LDataLatency, Reg);
+ const SDep& dep = SDep(SU, SDep::Data, LDataLatency, *Alias);
if (!UnitLatencies) {
ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep));
ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep));
}
UseSU->addPred(dep);
}
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- std::vector<SUnit *> &UseList = Uses[*Alias];
- for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
- SUnit *UseSU = UseList[i];
- if (UseSU == SU)
- continue;
- const SDep& dep = SDep(SU, SDep::Data, DataLatency, *Alias);
- if (!UnitLatencies) {
- ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep));
- ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep));
+ }
+}
+
+/// addPhysRegDeps - Add register dependencies (data, anti, and output) from
+/// this SUnit to following instructions in the same scheduling region that
+/// depend the physical register referenced at OperIdx.
+void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {
+ const MachineInstr *MI = SU->getInstr();
+ const MachineOperand &MO = MI->getOperand(OperIdx);
+
+ // Optionally add output and anti dependencies. For anti
+ // dependencies we use a latency of 0 because for a multi-issue
+ // target we want to allow the defining instruction to issue
+ // in the same cycle as the using instruction.
+ // TODO: Using a latency of 1 here for output dependencies assumes
+ // there's no cost for reusing registers.
+ SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output;
+ for (const unsigned *Alias = TRI->getOverlaps(MO.getReg()); *Alias; ++Alias) {
+ Reg2SUnitsMap::iterator DefI = Defs.find(*Alias);
+ if (DefI == Defs.end())
+ continue;
+ std::vector<SUnit *> &DefList = DefI->SUnits;
+ for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
+ SUnit *DefSU = DefList[i];
+ if (DefSU == &ExitSU)
+ continue;
+ if (DefSU != SU &&
+ (Kind != SDep::Output || !MO.isDead() ||
+ !DefSU->getInstr()->registerDefIsDead(*Alias))) {
+ if (Kind == SDep::Anti)
+ DefSU->addPred(SDep(SU, Kind, 0, /*Reg=*/*Alias));
+ else {
+ unsigned AOLat = TII->getOutputLatency(InstrItins, MI, OperIdx,
+ DefSU->getInstr());
+ DefSU->addPred(SDep(SU, Kind, AOLat, /*Reg=*/*Alias));
}
- UseSU->addPred(dep);
}
}
+ }
+
+ if (!MO.isDef()) {
+ // Either insert a new Reg2SUnits entry with an empty SUnits list, or
+ // retrieve the existing SUnits list for this register's uses.
+ // Push this SUnit on the use list.
+ Uses[MO.getReg()].SUnits.push_back(SU);
+ }
+ else {
+ addPhysRegDataDeps(SU, MO);
+
+ // Either insert a new Reg2SUnits entry with an empty SUnits list, or
+ // retrieve the existing SUnits list for this register's defs.
+ std::vector<SUnit *> &DefList = Defs[MO.getReg()].SUnits;
// If a def is going to wrap back around to the top of the loop,
// backschedule it.
if (!UnitLatencies && DefList.empty()) {
- LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(Reg);
+ LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(MO.getReg());
if (I != LoopRegs.Deps.end()) {
const MachineOperand *UseMO = I->second.first;
unsigned Count = I->second.second;
const MachineInstr *UseMI = UseMO->getParent();
unsigned UseMOIdx = UseMO - &UseMI->getOperand(0);
const MCInstrDesc &UseMCID = UseMI->getDesc();
+ const TargetSubtargetInfo &ST =
+ TM.getSubtarget<TargetSubtargetInfo>();
+ unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
// TODO: If we knew the total depth of the region here, we could
// handle the case where the whole loop is inside the region but
// is large enough that the isScheduleHigh trick isn't needed.
@@ -332,7 +338,11 @@
}
}
- UseList.clear();
+ // clear this register's use list
+ Reg2SUnitsMap::iterator UsesI = Uses.find(MO.getReg());
+ if (UsesI != Uses.end())
+ UsesI->SUnits.clear();
+
if (!MO.isDead())
DefList.clear();
@@ -345,9 +355,8 @@
while (!DefList.empty() && DefList.back()->isCall)
DefList.pop_back();
}
+ // Defs are pushed in the order they are visited and never reordered.
DefList.push_back(SU);
- } else {
- UseList.push_back(SU);
}
}
@@ -482,13 +491,10 @@
DbgValues.clear();
FirstDbgValue = NULL;
- // Model data dependencies between instructions being scheduled and the
- // ExitSU.
- AddSchedBarrierDeps();
-
- for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) {
- assert(Defs[i].empty() && "Only BuildGraph should push/pop Defs");
- }
+ assert(Defs.empty() && Uses.empty() &&
+ "Only BuildGraph should update Defs/Uses");
+ Defs.setUniverse(TRI->getNumRegs());
+ Uses.setUniverse(TRI->getNumRegs());
assert(VRegDefs.empty() && "Only BuildSchedGraph may access VRegDefs");
// FIXME: Allow SparseSet to reserve space for the creation of virtual
@@ -496,6 +502,10 @@
// because we want to assert that vregs are not created during DAG building.
VRegDefs.setUniverse(MRI.getNumVirtRegs());
+ // Model data dependencies between instructions being scheduled and the
+ // ExitSU.
+ AddSchedBarrierDeps();
+
// Walk the list of instructions, from bottom moving up.
MachineInstr *PrevMI = NULL;
for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin;
@@ -685,10 +695,8 @@
if (PrevMI)
FirstDbgValue = PrevMI;
- for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) {
- Defs[i].clear();
- Uses[i].clear();
- }
+ Defs.clear();
+ Uses.clear();
VRegDefs.clear();
PendingLoads.clear();
MISUnitMap.clear();
Modified: llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.h?rev=151228&r1=151227&r2=151228&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.h (original)
+++ llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.h Wed Feb 22 19:52:38 2012
@@ -118,12 +118,15 @@
/// the def-side latency only.
bool UnitLatencies;
- /// Defs, Uses - Remember where defs and uses of each register are as we
- /// iterate upward through the instructions. This is allocated here instead
- /// of inside BuildSchedGraph to avoid the need for it to be initialized and
- /// destructed for each block.
- std::vector<std::vector<SUnit *> > Defs;
- std::vector<std::vector<SUnit *> > Uses;
+ /// An individual mapping from physical register number to an SUnit vector.
+ struct Reg2SUnits {
+ unsigned PhysReg;
+ std::vector<SUnit*> SUnits;
+
+ explicit Reg2SUnits(unsigned reg): PhysReg(reg) {}
+
+ unsigned getSparseSetKey() const { return PhysReg; }
+ };
/// An individual mapping from virtual register number to SUnit.
struct VReg2SUnit {
@@ -139,8 +142,16 @@
// Use SparseSet as a SparseMap by relying on the fact that it never
// compares ValueT's, only unsigned keys. This allows the set to be cleared
// between scheduling regions in constant time.
+ typedef SparseSet<Reg2SUnits> Reg2SUnitsMap;
typedef SparseSet<VReg2SUnit> VReg2SUnitMap;
+ /// Defs, Uses - Remember where defs and uses of each register are as we
+ /// iterate upward through the instructions. This is allocated here instead
+ /// of inside BuildSchedGraph to avoid the need for it to be initialized and
+ /// destructed for each block.
+ Reg2SUnitsMap Defs;
+ Reg2SUnitsMap Uses;
+
// Track the last instructon in this region defining each virtual register.
VReg2SUnitMap VRegDefs;
@@ -247,6 +258,7 @@
}
void initSUnits();
+ void addPhysRegDataDeps(SUnit *SU, const MachineOperand &MO);
void addPhysRegDeps(SUnit *SU, unsigned OperIdx);
void addVRegDefDeps(SUnit *SU, unsigned OperIdx);
void addVRegUseDeps(SUnit *SU, unsigned OperIdx);
More information about the llvm-commits
mailing list