<div dir="ltr">Hi Wei,<div><br></div><div>I'm getting an error when building with clang:</div><div><br></div><div><div>/usr/local/google/home/tejohnson/llvm/llvm_15/lib/CodeGen/InlineSpiller.cpp:174:8: error: 'postOptimization' overrides a member function but is not marked 'override' [-Werror,-Winconsistent-missing-override]</div><div> void postOptimization();</div><div> ^</div><div>/usr/local/google/home/tejohnson/llvm/llvm_15/lib/CodeGen/Spiller.h:32:18: note: overridden virtual function is here</div><div> virtual void postOptimization() {};</div><div> ^</div><div>1 error generated.</div></div><div><br></div><div>Teresa</div><div><br></div></div><div class="gmail_extra"><br><div class="gmail_quote">On Mon, Apr 4, 2016 at 9:42 AM, Wei Mi via llvm-commits <span dir="ltr"><<a href="mailto:llvm-commits@lists.llvm.org" target="_blank">llvm-commits@lists.llvm.org</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">Author: wmi<br>
Date: Mon Apr 4 11:42:40 2016<br>
New Revision: 265309<br>
<br>
URL: <a href="http://llvm.org/viewvc/llvm-project?rev=265309&view=rev" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project?rev=265309&view=rev</a><br>
Log:<br>
Replace analyzeSiblingValues with new algorithm to fix its compile<br>
time issue. The patch is to solve PR17409 and its duplicates.<br>
<br>
analyzeSiblingValues is a N x N complexity algorithm where N is<br>
the number of siblings generated by reg splitting. Although it<br>
causes siginificant compile time issue when N is large, it is also<br>
important for performance since it removes redundent spills and<br>
enables rematerialization.<br>
<br>
To solve the compile time issue, the patch removes analyzeSiblingValues<br>
and replaces it with lower cost alternatives containing two parts. The<br>
first part creates a new spill hoisting method in postOptimization of<br>
register allocation. It does spill hoisting at once after all the spills<br>
are generated instead of inside every instance of selectOrSplit. The<br>
second part queries the define expr of the original register for<br>
rematerializaiton and keep it always available during register allocation<br>
even if it is already dead. It deletes those dead instructions only in<br>
postOptimization. With the two parts in the patch, it can remove<br>
analyzeSiblingValues without sacrificing performance.<br>
<br>
Differential Revision: <a href="http://reviews.llvm.org/D15302" rel="noreferrer" target="_blank">http://reviews.llvm.org/D15302</a><br>
<br>
<br>
Added:<br>
llvm/trunk/test/CodeGen/X86/hoist-spill.ll<br>
llvm/trunk/test/CodeGen/X86/new-remat.ll<br>
Removed:<br>
llvm/trunk/test/CodeGen/AArch64/aarch64-deferred-spilling.ll<br>
Modified:<br>
llvm/trunk/include/llvm/CodeGen/LiveRangeEdit.h<br>
llvm/trunk/lib/CodeGen/InlineSpiller.cpp<br>
llvm/trunk/lib/CodeGen/LiveRangeEdit.cpp<br>
llvm/trunk/lib/CodeGen/RegAllocBase.cpp<br>
llvm/trunk/lib/CodeGen/RegAllocBase.h<br>
llvm/trunk/lib/CodeGen/RegAllocBasic.cpp<br>
llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp<br>
llvm/trunk/lib/CodeGen/RegAllocPBQP.cpp<br>
llvm/trunk/lib/CodeGen/Spiller.h<br>
llvm/trunk/lib/CodeGen/SplitKit.cpp<br>
llvm/trunk/lib/CodeGen/SplitKit.h<br>
llvm/trunk/test/CodeGen/X86/fp128-compare.ll<br>
llvm/trunk/test/CodeGen/X86/ragreedy-hoist-spill.ll<br>
<br>
Modified: llvm/trunk/include/llvm/CodeGen/LiveRangeEdit.h<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/LiveRangeEdit.h?rev=265309&r1=265308&r2=265309&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/LiveRangeEdit.h?rev=265309&r1=265308&r2=265309&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/include/llvm/CodeGen/LiveRangeEdit.h (original)<br>
+++ llvm/trunk/include/llvm/CodeGen/LiveRangeEdit.h Mon Apr 4 11:42:40 2016<br>
@@ -72,6 +72,10 @@ private:<br>
/// ScannedRemattable - true when remattable values have been identified.<br>
bool ScannedRemattable;<br>
<br>
+ /// DeadRemats - The saved instructions which have already been dead after<br>
+ /// rematerialization but not deleted yet -- to be done in postOptimization.<br>
+ SmallPtrSet<MachineInstr *, 32> *DeadRemats;<br>
+<br>
/// Remattable - Values defined by remattable instructions as identified by<br>
/// tii.isTriviallyReMaterializable().<br>
SmallPtrSet<const VNInfo*,4> Remattable;<br>
@@ -116,13 +120,16 @@ public:<br>
/// @param vrm Map of virtual registers to physical registers for this<br>
/// function. If NULL, no virtual register map updates will<br>
/// be done. This could be the case if called before Regalloc.<br>
+ /// @param deadRemats The collection of all the instructions defining an<br>
+ /// original reg and are dead after remat.<br>
LiveRangeEdit(LiveInterval *parent, SmallVectorImpl<unsigned> &newRegs,<br>
MachineFunction &MF, LiveIntervals &lis, VirtRegMap *vrm,<br>
- Delegate *delegate = nullptr)<br>
+ Delegate *delegate = nullptr,<br>
+ SmallPtrSet<MachineInstr *, 32> *deadRemats = nullptr)<br>
: Parent(parent), NewRegs(newRegs), MRI(MF.getRegInfo()), LIS(lis),<br>
- VRM(vrm), TII(*MF.getSubtarget().getInstrInfo()),<br>
- TheDelegate(delegate), FirstNew(newRegs.size()),<br>
- ScannedRemattable(false) {<br>
+ VRM(vrm), TII(*MF.getSubtarget().getInstrInfo()), TheDelegate(delegate),<br>
+ FirstNew(newRegs.size()), ScannedRemattable(false),<br>
+ DeadRemats(deadRemats) {<br>
MRI.setDelegate(this);<br>
}<br>
<br>
@@ -142,6 +149,16 @@ public:<br>
bool empty() const { return size() == 0; }<br>
unsigned get(unsigned idx) const { return NewRegs[idx+FirstNew]; }<br>
<br>
+ /// pop_back - It allows LiveRangeEdit users to drop new registers.<br>
+ /// The context is when an original def instruction of a register is<br>
+ /// dead after rematerialization, we still want to keep it for following<br>
+ /// rematerializations. We save the def instruction in DeadRemats,<br>
+ /// and replace the original dst register with a new dummy register so<br>
+ /// the live range of original dst register can be shrinked normally.<br>
+ /// We don't want to allocate phys register for the dummy register, so<br>
+ /// we want to drop it from the NewRegs set.<br>
+ void pop_back() { NewRegs.pop_back(); }<br>
+<br>
ArrayRef<unsigned> regs() const {<br>
return makeArrayRef(NewRegs).slice(FirstNew);<br>
}<br>
@@ -175,15 +192,15 @@ public:<br>
/// Remat - Information needed to rematerialize at a specific location.<br>
struct Remat {<br>
VNInfo *ParentVNI; // parent_'s value at the remat location.<br>
- MachineInstr *OrigMI; // Instruction defining ParentVNI.<br>
+ MachineInstr *OrigMI; // Instruction defining OrigVNI. It contains the<br>
+ // real expr for remat.<br>
explicit Remat(VNInfo *ParentVNI) : ParentVNI(ParentVNI), OrigMI(nullptr) {}<br>
};<br>
<br>
/// canRematerializeAt - Determine if ParentVNI can be rematerialized at<br>
/// UseIdx. It is assumed that parent_.getVNINfoAt(UseIdx) == ParentVNI.<br>
/// When cheapAsAMove is set, only cheap remats are allowed.<br>
- bool canRematerializeAt(Remat &RM,<br>
- SlotIndex UseIdx,<br>
+ bool canRematerializeAt(Remat &RM, VNInfo *OrigVNI, SlotIndex UseIdx,<br>
bool cheapAsAMove);<br>
<br>
/// rematerializeAt - Rematerialize RM.ParentVNI into DestReg by inserting an<br>
@@ -208,6 +225,12 @@ public:<br>
return Rematted.count(ParentVNI);<br>
}<br>
<br>
+ void markDeadRemat(MachineInstr *inst) {<br>
+ // DeadRemats is an optional field.<br>
+ if (DeadRemats)<br>
+ DeadRemats->insert(inst);<br>
+ }<br>
+<br>
/// eraseVirtReg - Notify the delegate that Reg is no longer in use, and try<br>
/// to erase it from LIS.<br>
void eraseVirtReg(unsigned Reg);<br>
@@ -218,8 +241,11 @@ public:<br>
/// RegsBeingSpilled lists registers currently being spilled by the register<br>
/// allocator. These registers should not be split into new intervals<br>
/// as currently those new intervals are not guaranteed to spill.<br>
- void eliminateDeadDefs(SmallVectorImpl<MachineInstr*> &Dead,<br>
- ArrayRef<unsigned> RegsBeingSpilled = None);<br>
+ /// NoSplit indicates this func is used after the iterations of selectOrSplit<br>
+ /// where registers should not be split into new intervals.<br>
+ void eliminateDeadDefs(SmallVectorImpl<MachineInstr *> &Dead,<br>
+ ArrayRef<unsigned> RegsBeingSpilled = None,<br>
+ bool NoSplit = false);<br>
<br>
/// calculateRegClassAndHint - Recompute register class and hint for each new<br>
/// register.<br>
<br>
Modified: llvm/trunk/lib/CodeGen/InlineSpiller.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/InlineSpiller.cpp?rev=265309&r1=265308&r2=265309&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/InlineSpiller.cpp?rev=265309&r1=265308&r2=265309&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/CodeGen/InlineSpiller.cpp (original)<br>
+++ llvm/trunk/lib/CodeGen/InlineSpiller.cpp Mon Apr 4 11:42:40 2016<br>
@@ -48,13 +48,78 @@ STATISTIC(NumReloadsRemoved, "Number of<br>
STATISTIC(NumFolded, "Number of folded stack accesses");<br>
STATISTIC(NumFoldedLoads, "Number of folded loads");<br>
STATISTIC(NumRemats, "Number of rematerialized defs for spilling");<br>
-STATISTIC(NumOmitReloadSpill, "Number of omitted spills of reloads");<br>
-STATISTIC(NumHoists, "Number of hoisted spills");<br>
<br>
static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,<br>
cl::desc("Disable inline spill hoisting"));<br>
<br>
namespace {<br>
+class HoistSpillHelper {<br>
+ MachineFunction &MF;<br>
+ LiveIntervals &LIS;<br>
+ LiveStacks &LSS;<br>
+ AliasAnalysis *AA;<br>
+ MachineDominatorTree &MDT;<br>
+ MachineLoopInfo &Loops;<br>
+ VirtRegMap &VRM;<br>
+ MachineFrameInfo &MFI;<br>
+ MachineRegisterInfo &MRI;<br>
+ const TargetInstrInfo &TII;<br>
+ const TargetRegisterInfo &TRI;<br>
+ const MachineBlockFrequencyInfo &MBFI;<br>
+<br>
+ // Map from StackSlot to its original register.<br>
+ DenseMap<int, unsigned> StackSlotToReg;<br>
+ // Map from pair of (StackSlot and Original VNI) to a set of spills which<br>
+ // have the same stackslot and have equal values defined by Original VNI.<br>
+ // These spills are mergeable and are hoist candiates.<br>
+ typedef DenseMap<std::pair<int, VNInfo *>, SmallPtrSet<MachineInstr *, 16>><br>
+ MergeableSpillsMap;<br>
+ MergeableSpillsMap MergeableSpills;<br>
+<br>
+ /// This is the map from original register to a set containing all its<br>
+ /// siblings. To hoist a spill to another BB, we need to find out a live<br>
+ /// sibling there and use it as the source of the new spill.<br>
+ DenseMap<unsigned, SmallSetVector<unsigned, 16>> Virt2SiblingsMap;<br>
+<br>
+ bool isSpillCandBB(unsigned OrigReg, VNInfo &OrigVNI, MachineBasicBlock &BB,<br>
+ unsigned &LiveReg);<br>
+<br>
+ void rmRedundantSpills(<br>
+ SmallPtrSet<MachineInstr *, 16> &Spills,<br>
+ SmallVectorImpl<MachineInstr *> &SpillsToRm,<br>
+ DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);<br>
+<br>
+ void getVisitOrders(<br>
+ MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,<br>
+ SmallVectorImpl<MachineDomTreeNode *> &Orders,<br>
+ SmallVectorImpl<MachineInstr *> &SpillsToRm,<br>
+ DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,<br>
+ DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);<br>
+<br>
+ void runHoistSpills(unsigned OrigReg, VNInfo &OrigVNI,<br>
+ SmallPtrSet<MachineInstr *, 16> &Spills,<br>
+ SmallVectorImpl<MachineInstr *> &SpillsToRm,<br>
+ DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns);<br>
+<br>
+public:<br>
+ HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,<br>
+ VirtRegMap &vrm)<br>
+ : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),<br>
+ LSS(pass.getAnalysis<LiveStacks>()),<br>
+ AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),<br>
+ MDT(pass.getAnalysis<MachineDominatorTree>()),<br>
+ Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),<br>
+ MFI(*mf.getFrameInfo()), MRI(mf.getRegInfo()),<br>
+ TII(*mf.getSubtarget().getInstrInfo()),<br>
+ TRI(*mf.getSubtarget().getRegisterInfo()),<br>
+ MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()) {}<br>
+<br>
+ void addToMergeableSpills(MachineInstr *Spill, int StackSlot,<br>
+ unsigned Original);<br>
+ bool rmFromMergeableSpills(MachineInstr *Spill, int StackSlot);<br>
+ void hoistAllSpills(LiveRangeEdit &Edit);<br>
+};<br>
+<br>
class InlineSpiller : public Spiller {<br>
MachineFunction &MF;<br>
LiveIntervals &LIS;<br>
@@ -85,56 +150,12 @@ class InlineSpiller : public Spiller {<br>
// Values that failed to remat at some point.<br>
SmallPtrSet<VNInfo*, 8> UsedValues;<br>
<br>
-public:<br>
- // Information about a value that was defined by a copy from a sibling<br>
- // register.<br>
- struct SibValueInfo {<br>
- // True when all reaching defs were reloads: No spill is necessary.<br>
- bool AllDefsAreReloads;<br>
-<br>
- // True when value is defined by an original PHI not from splitting.<br>
- bool DefByOrigPHI;<br>
-<br>
- // True when the COPY defining this value killed its source.<br>
- bool KillsSource;<br>
-<br>
- // The preferred register to spill.<br>
- unsigned SpillReg;<br>
-<br>
- // The value of SpillReg that should be spilled.<br>
- VNInfo *SpillVNI;<br>
-<br>
- // The block where SpillVNI should be spilled. Currently, this must be the<br>
- // block containing SpillVNI->def.<br>
- MachineBasicBlock *SpillMBB;<br>
-<br>
- // A defining instruction that is not a sibling copy or a reload, or NULL.<br>
- // This can be used as a template for rematerialization.<br>
- MachineInstr *DefMI;<br>
-<br>
- // List of values that depend on this one. These values are actually the<br>
- // same, but live range splitting has placed them in different registers,<br>
- // or SSA update needed to insert PHI-defs to preserve SSA form. This is<br>
- // copies of the current value and phi-kills. Usually only phi-kills cause<br>
- // more than one dependent value.<br>
- TinyPtrVector<VNInfo*> Deps;<br>
-<br>
- SibValueInfo(unsigned Reg, VNInfo *VNI)<br>
- : AllDefsAreReloads(true), DefByOrigPHI(false), KillsSource(false),<br>
- SpillReg(Reg), SpillVNI(VNI), SpillMBB(nullptr), DefMI(nullptr) {}<br>
-<br>
- // Returns true when a def has been found.<br>
- bool hasDef() const { return DefByOrigPHI || DefMI; }<br>
- };<br>
-<br>
-private:<br>
- // Values in RegsToSpill defined by sibling copies.<br>
- typedef DenseMap<VNInfo*, SibValueInfo> SibValueMap;<br>
- SibValueMap SibValues;<br>
-<br>
// Dead defs generated during spilling.<br>
SmallVector<MachineInstr*, 8> DeadDefs;<br>
<br>
+ // Object records spills information and does the hoisting.<br>
+ HoistSpillHelper HSpiller;<br>
+<br>
~InlineSpiller() override {}<br>
<br>
public:<br>
@@ -147,9 +168,11 @@ public:<br>
MFI(*mf.getFrameInfo()), MRI(mf.getRegInfo()),<br>
TII(*mf.getSubtarget().getInstrInfo()),<br>
TRI(*mf.getSubtarget().getRegisterInfo()),<br>
- MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()) {}<br>
+ MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),<br>
+ HSpiller(pass, mf, vrm) {}<br>
<br>
void spill(LiveRangeEdit &) override;<br>
+ void postOptimization();<br>
<br>
private:<br>
bool isSnippet(const LiveInterval &SnipLI);<br>
@@ -161,11 +184,7 @@ private:<br>
}<br>
<br>
bool isSibling(unsigned Reg);<br>
- MachineInstr *traceSiblingValue(unsigned, VNInfo*, VNInfo*);<br>
- void propagateSiblingValue(SibValueMap::iterator, VNInfo *VNI = nullptr);<br>
- void analyzeSiblingValues();<br>
-<br>
- bool hoistSpill(LiveInterval &SpillLI, MachineInstr &CopyMI);<br>
+ bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);<br>
void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);<br>
<br>
void markValueUsed(LiveInterval*, VNInfo*);<br>
@@ -297,417 +316,43 @@ void InlineSpiller::collectRegsToSpill()<br>
}<br>
}<br>
<br>
-<br>
-//===----------------------------------------------------------------------===//<br>
-// Sibling Values<br>
-//===----------------------------------------------------------------------===//<br>
-<br>
-// After live range splitting, some values to be spilled may be defined by<br>
-// copies from sibling registers. We trace the sibling copies back to the<br>
-// original value if it still exists. We need it for rematerialization.<br>
-//<br>
-// Even when the value can't be rematerialized, we still want to determine if<br>
-// the value has already been spilled, or we may want to hoist the spill from a<br>
-// loop.<br>
-<br>
bool InlineSpiller::isSibling(unsigned Reg) {<br>
return TargetRegisterInfo::isVirtualRegister(Reg) &&<br>
VRM.getOriginal(Reg) == Original;<br>
}<br>
<br>
-#ifndef NDEBUG<br>
-static raw_ostream &operator<<(raw_ostream &OS,<br>
- const InlineSpiller::SibValueInfo &SVI) {<br>
- OS << "spill " << PrintReg(SVI.SpillReg) << ':'<br>
- << SVI.SpillVNI->id << '@' << SVI.SpillVNI->def;<br>
- if (SVI.SpillMBB)<br>
- OS << " in BB#" << SVI.SpillMBB->getNumber();<br>
- if (SVI.AllDefsAreReloads)<br>
- OS << " all-reloads";<br>
- if (SVI.DefByOrigPHI)<br>
- OS << " orig-phi";<br>
- if (SVI.KillsSource)<br>
- OS << " kill";<br>
- OS << " deps[";<br>
- for (VNInfo *Dep : SVI.Deps)<br>
- OS << ' ' << Dep->id << '@' << Dep->def;<br>
- OS << " ]";<br>
- if (SVI.DefMI)<br>
- OS << " def: " << *SVI.DefMI;<br>
- else<br>
- OS << '\n';<br>
- return OS;<br>
-}<br>
-#endif<br>
-<br>
-/// propagateSiblingValue - Propagate the value in SVI to dependents if it is<br>
-/// known. Otherwise remember the dependency for later.<br>
+/// It is beneficial to spill to earlier place in the same BB in case<br>
+/// as follows:<br>
+/// There is an alternative def earlier in the same MBB.<br>
+/// Hoist the spill as far as possible in SpillMBB. This can ease<br>
+/// register pressure:<br>
///<br>
-/// @param SVIIter SibValues entry to propagate.<br>
-/// @param VNI Dependent value, or NULL to propagate to all saved dependents.<br>
-void InlineSpiller::propagateSiblingValue(SibValueMap::iterator SVIIter,<br>
- VNInfo *VNI) {<br>
- SibValueMap::value_type *SVI = &*SVIIter;<br>
-<br>
- // When VNI is non-NULL, add it to SVI's deps, and only propagate to that.<br>
- TinyPtrVector<VNInfo*> FirstDeps;<br>
- if (VNI) {<br>
- FirstDeps.push_back(VNI);<br>
- SVI->second.Deps.push_back(VNI);<br>
- }<br>
-<br>
- // Has the value been completely determined yet? If not, defer propagation.<br>
- if (!SVI->second.hasDef())<br>
- return;<br>
-<br>
- // Work list of values to propagate.<br>
- SmallSetVector<SibValueMap::value_type *, 8> WorkList;<br>
- WorkList.insert(SVI);<br>
-<br>
- do {<br>
- SVI = WorkList.pop_back_val();<br>
- TinyPtrVector<VNInfo*> *Deps = VNI ? &FirstDeps : &SVI->second.Deps;<br>
- VNI = nullptr;<br>
-<br>
- SibValueInfo &SV = SVI->second;<br>
- if (!SV.SpillMBB)<br>
- SV.SpillMBB = LIS.getMBBFromIndex(SV.SpillVNI->def);<br>
-<br>
- DEBUG(dbgs() << " prop to " << Deps->size() << ": "<br>
- << SVI->first->id << '@' << SVI->first->def << ":\t" << SV);<br>
-<br>
- assert(SV.hasDef() && "Propagating undefined value");<br>
-<br>
- // Should this value be propagated as a preferred spill candidate? We don't<br>
- // propagate values of registers that are about to spill.<br>
- bool PropSpill = !DisableHoisting && !isRegToSpill(SV.SpillReg);<br>
- unsigned SpillDepth = ~0u;<br>
-<br>
- for (VNInfo *Dep : *Deps) {<br>
- SibValueMap::iterator DepSVI = SibValues.find(Dep);<br>
- assert(DepSVI != SibValues.end() && "Dependent value not in SibValues");<br>
- SibValueInfo &DepSV = DepSVI->second;<br>
- if (!DepSV.SpillMBB)<br>
- DepSV.SpillMBB = LIS.getMBBFromIndex(DepSV.SpillVNI->def);<br>
-<br>
- bool Changed = false;<br>
-<br>
- // Propagate defining instruction.<br>
- if (!DepSV.hasDef()) {<br>
- Changed = true;<br>
- DepSV.DefMI = SV.DefMI;<br>
- DepSV.DefByOrigPHI = SV.DefByOrigPHI;<br>
- }<br>
-<br>
- // Propagate AllDefsAreReloads. For PHI values, this computes an AND of<br>
- // all predecessors.<br>
- if (!SV.AllDefsAreReloads && DepSV.AllDefsAreReloads) {<br>
- Changed = true;<br>
- DepSV.AllDefsAreReloads = false;<br>
- }<br>
-<br>
- // Propagate best spill value.<br>
- if (PropSpill && SV.SpillVNI != DepSV.SpillVNI) {<br>
- if (SV.SpillMBB == DepSV.SpillMBB) {<br>
- // DepSV is in the same block. Hoist when dominated.<br>
- if (DepSV.KillsSource && SV.SpillVNI->def < DepSV.SpillVNI->def) {<br>
- // This is an alternative def earlier in the same MBB.<br>
- // Hoist the spill as far as possible in SpillMBB. This can ease<br>
- // register pressure:<br>
- //<br>
- // x = def<br>
- // y = use x<br>
- // s = copy x<br>
- //<br>
- // Hoisting the spill of s to immediately after the def removes the<br>
- // interference between x and y:<br>
- //<br>
- // x = def<br>
- // spill x<br>
- // y = use x<kill><br>
- //<br>
- // This hoist only helps when the DepSV copy kills its source.<br>
- Changed = true;<br>
- DepSV.SpillReg = SV.SpillReg;<br>
- DepSV.SpillVNI = SV.SpillVNI;<br>
- DepSV.SpillMBB = SV.SpillMBB;<br>
- }<br>
- } else {<br>
- // DepSV is in a different block.<br>
- if (SpillDepth == ~0u)<br>
- SpillDepth = Loops.getLoopDepth(SV.SpillMBB);<br>
-<br>
- // Also hoist spills to blocks with smaller loop depth, but make sure<br>
- // that the new value dominates. Non-phi dependents are always<br>
- // dominated, phis need checking.<br>
-<br>
- const BranchProbability MarginProb(4, 5); // 80%<br>
- // Hoist a spill to outer loop if there are multiple dependents (it<br>
- // can be beneficial if more than one dependents are hoisted) or<br>
- // if DepSV (the hoisting source) is hotter than SV (the hoisting<br>
- // destination) (we add a 80% margin to bias a little towards<br>
- // loop depth).<br>
- bool HoistCondition =<br>
- (MBFI.getBlockFreq(DepSV.SpillMBB) >=<br>
- (MBFI.getBlockFreq(SV.SpillMBB) * MarginProb)) ||<br>
- Deps->size() > 1;<br>
-<br>
- if ((Loops.getLoopDepth(DepSV.SpillMBB) > SpillDepth) &&<br>
- HoistCondition &&<br>
- (!DepSVI->first->isPHIDef() ||<br>
- MDT.dominates(SV.SpillMBB, DepSV.SpillMBB))) {<br>
- Changed = true;<br>
- DepSV.SpillReg = SV.SpillReg;<br>
- DepSV.SpillVNI = SV.SpillVNI;<br>
- DepSV.SpillMBB = SV.SpillMBB;<br>
- }<br>
- }<br>
- }<br>
-<br>
- if (!Changed)<br>
- continue;<br>
-<br>
- // Something changed in DepSVI. Propagate to dependents.<br>
- WorkList.insert(&*DepSVI);<br>
-<br>
- DEBUG(dbgs() << " update " << DepSVI->first->id << '@'<br>
- << DepSVI->first->def << " to:\t" << DepSV);<br>
- }<br>
- } while (!WorkList.empty());<br>
-}<br>
-<br>
-/// traceSiblingValue - Trace a value that is about to be spilled back to the<br>
-/// real defining instructions by looking through sibling copies. Always stay<br>
-/// within the range of OrigVNI so the registers are known to carry the same<br>
-/// value.<br>
+/// x = def<br>
+/// y = use x<br>
+/// s = copy x<br>
///<br>
-/// Determine if the value is defined by all reloads, so spilling isn't<br>
-/// necessary - the value is already in the stack slot.<br>
+/// Hoisting the spill of s to immediately after the def removes the<br>
+/// interference between x and y:<br>
///<br>
-/// Return a defining instruction that may be a candidate for rematerialization.<br>
+/// x = def<br>
+/// spill x<br>
+/// y = use x<kill><br>
///<br>
-MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI,<br>
- VNInfo *OrigVNI) {<br>
- // Check if a cached value already exists.<br>
- SibValueMap::iterator SVI;<br>
- bool Inserted;<br>
- std::tie(SVI, Inserted) =<br>
- SibValues.insert(std::make_pair(UseVNI, SibValueInfo(UseReg, UseVNI)));<br>
- if (!Inserted) {<br>
- DEBUG(dbgs() << "Cached value " << PrintReg(UseReg) << ':'<br>
- << UseVNI->id << '@' << UseVNI->def << ' ' << SVI->second);<br>
- return SVI->second.DefMI;<br>
- }<br>
-<br>
- DEBUG(dbgs() << "Tracing value " << PrintReg(UseReg) << ':'<br>
- << UseVNI->id << '@' << UseVNI->def << '\n');<br>
-<br>
- // List of (Reg, VNI) that have been inserted into SibValues, but need to be<br>
- // processed.<br>
- SmallVector<std::pair<unsigned, VNInfo*>, 8> WorkList;<br>
- WorkList.push_back(std::make_pair(UseReg, UseVNI));<br>
-<br>
- LiveInterval &OrigLI = LIS.getInterval(Original);<br>
- do {<br>
- unsigned Reg;<br>
- VNInfo *VNI;<br>
- std::tie(Reg, VNI) = WorkList.pop_back_val();<br>
- DEBUG(dbgs() << " " << PrintReg(Reg) << ':' << VNI->id << '@' << VNI->def<br>
- << ":\t");<br>
-<br>
- // First check if this value has already been computed.<br>
- SVI = SibValues.find(VNI);<br>
- assert(SVI != SibValues.end() && "Missing SibValues entry");<br>
-<br>
- // Trace through PHI-defs created by live range splitting.<br>
- if (VNI->isPHIDef()) {<br>
- // Stop at original PHIs. We don't know the value at the<br>
- // predecessors. Look up the VNInfo for the current definition<br>
- // in OrigLI, to properly determine whether or not this phi was<br>
- // added by splitting.<br>
- if (VNI->def == OrigLI.getVNInfoAt(VNI->def)->def) {<br>
- DEBUG(dbgs() << "orig phi value\n");<br>
- SVI->second.DefByOrigPHI = true;<br>
- SVI->second.AllDefsAreReloads = false;<br>
- propagateSiblingValue(SVI);<br>
- continue;<br>
- }<br>
-<br>
- // This is a PHI inserted by live range splitting. We could trace the<br>
- // live-out value from predecessor blocks, but that search can be very<br>
- // expensive if there are many predecessors and many more PHIs as<br>
- // generated by tail-dup when it sees an indirectbr. Instead, look at<br>
- // all the non-PHI defs that have the same value as OrigVNI. They must<br>
- // jointly dominate VNI->def. This is not optimal since VNI may actually<br>
- // be jointly dominated by a smaller subset of defs, so there is a change<br>
- // we will miss a AllDefsAreReloads optimization.<br>
-<br>
- // Separate all values dominated by OrigVNI into PHIs and non-PHIs.<br>
- SmallVector<VNInfo*, 8> PHIs, NonPHIs;<br>
- LiveInterval &LI = LIS.getInterval(Reg);<br>
-<br>
- for (LiveInterval::vni_iterator VI = LI.vni_begin(), VE = LI.vni_end();<br>
- VI != VE; ++VI) {<br>
- VNInfo *VNI2 = *VI;<br>
- if (VNI2->isUnused())<br>
- continue;<br>
- if (!OrigLI.containsOneValue() &&<br>
- OrigLI.getVNInfoAt(VNI2->def) != OrigVNI)<br>
- continue;<br>
- if (VNI2->isPHIDef() && VNI2->def != OrigVNI->def)<br>
- PHIs.push_back(VNI2);<br>
- else<br>
- NonPHIs.push_back(VNI2);<br>
- }<br>
- DEBUG(dbgs() << "split phi value, checking " << PHIs.size()<br>
- << " phi-defs, and " << NonPHIs.size()<br>
- << " non-phi/orig defs\n");<br>
-<br>
- // Create entries for all the PHIs. Don't add them to the worklist, we<br>
- // are processing all of them in one go here.<br>
- for (VNInfo *PHI : PHIs)<br>
- SibValues.insert(std::make_pair(PHI, SibValueInfo(Reg, PHI)));<br>
-<br>
- // Add every PHI as a dependent of all the non-PHIs.<br>
- for (VNInfo *NonPHI : NonPHIs) {<br>
- // Known value? Try an insertion.<br>
- std::tie(SVI, Inserted) =<br>
- SibValues.insert(std::make_pair(NonPHI, SibValueInfo(Reg, NonPHI)));<br>
- // Add all the PHIs as dependents of NonPHI.<br>
- SVI->second.Deps.insert(SVI->second.Deps.end(), PHIs.begin(),<br>
- PHIs.end());<br>
- // This is the first time we see NonPHI, add it to the worklist.<br>
- if (Inserted)<br>
- WorkList.push_back(std::make_pair(Reg, NonPHI));<br>
- else<br>
- // Propagate to all inserted PHIs, not just VNI.<br>
- propagateSiblingValue(SVI);<br>
- }<br>
-<br>
- // Next work list item.<br>
- continue;<br>
- }<br>
-<br>
- MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);<br>
- assert(MI && "Missing def");<br>
-<br>
- // Trace through sibling copies.<br>
- if (unsigned SrcReg = isFullCopyOf(MI, Reg)) {<br>
- if (isSibling(SrcReg)) {<br>
- LiveInterval &SrcLI = LIS.getInterval(SrcReg);<br>
- LiveQueryResult SrcQ = SrcLI.Query(VNI->def);<br>
- assert(SrcQ.valueIn() && "Copy from non-existing value");<br>
- // Check if this COPY kills its source.<br>
- SVI->second.KillsSource = SrcQ.isKill();<br>
- VNInfo *SrcVNI = SrcQ.valueIn();<br>
- DEBUG(dbgs() << "copy of " << PrintReg(SrcReg) << ':'<br>
- << SrcVNI->id << '@' << SrcVNI->def<br>
- << " kill=" << unsigned(SVI->second.KillsSource) << '\n');<br>
- // Known sibling source value? Try an insertion.<br>
- std::tie(SVI, Inserted) = SibValues.insert(<br>
- std::make_pair(SrcVNI, SibValueInfo(SrcReg, SrcVNI)));<br>
- // This is the first time we see Src, add it to the worklist.<br>
- if (Inserted)<br>
- WorkList.push_back(std::make_pair(SrcReg, SrcVNI));<br>
- propagateSiblingValue(SVI, VNI);<br>
- // Next work list item.<br>
- continue;<br>
- }<br>
- }<br>
-<br>
- // Track reachable reloads.<br>
- SVI->second.DefMI = MI;<br>
- SVI->second.SpillMBB = MI->getParent();<br>
- int FI;<br>
- if (Reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) {<br>
- DEBUG(dbgs() << "reload\n");<br>
- propagateSiblingValue(SVI);<br>
- // Next work list item.<br>
- continue;<br>
- }<br>
-<br>
- // Potential remat candidate.<br>
- DEBUG(dbgs() << "def " << *MI);<br>
- SVI->second.AllDefsAreReloads = false;<br>
- propagateSiblingValue(SVI);<br>
- } while (!WorkList.empty());<br>
-<br>
- // Look up the value we were looking for. We already did this lookup at the<br>
- // top of the function, but SibValues may have been invalidated.<br>
- SVI = SibValues.find(UseVNI);<br>
- assert(SVI != SibValues.end() && "Didn't compute requested info");<br>
- DEBUG(dbgs() << " traced to:\t" << SVI->second);<br>
- return SVI->second.DefMI;<br>
-}<br>
-<br>
-/// analyzeSiblingValues - Trace values defined by sibling copies back to<br>
-/// something that isn't a sibling copy.<br>
+/// This hoist only helps when the copy kills its source.<br>
///<br>
-/// Keep track of values that may be rematerializable.<br>
-void InlineSpiller::analyzeSiblingValues() {<br>
- SibValues.clear();<br>
-<br>
- // No siblings at all?<br>
- if (Edit->getReg() == Original)<br>
- return;<br>
-<br>
- LiveInterval &OrigLI = LIS.getInterval(Original);<br>
- for (unsigned Reg : RegsToSpill) {<br>
- LiveInterval &LI = LIS.getInterval(Reg);<br>
- for (LiveInterval::const_vni_iterator VI = LI.vni_begin(),<br>
- VE = LI.vni_end(); VI != VE; ++VI) {<br>
- VNInfo *VNI = *VI;<br>
- if (VNI->isUnused())<br>
- continue;<br>
- MachineInstr *DefMI = nullptr;<br>
- if (!VNI->isPHIDef()) {<br>
- DefMI = LIS.getInstructionFromIndex(VNI->def);<br>
- assert(DefMI && "No defining instruction");<br>
- }<br>
- // Check possible sibling copies.<br>
- if (VNI->isPHIDef() || DefMI->isCopy()) {<br>
- VNInfo *OrigVNI = OrigLI.getVNInfoAt(VNI->def);<br>
- assert(OrigVNI && "Def outside original live range");<br>
- if (OrigVNI->def != VNI->def)<br>
- DefMI = traceSiblingValue(Reg, VNI, OrigVNI);<br>
- }<br>
- if (DefMI && Edit->checkRematerializable(VNI, DefMI, AA)) {<br>
- DEBUG(dbgs() << "Value " << PrintReg(Reg) << ':' << VNI->id << '@'<br>
- << VNI->def << " may remat from " << *DefMI);<br>
- }<br>
- }<br>
- }<br>
-}<br>
-<br>
-/// hoistSpill - Given a sibling copy that defines a value to be spilled, insert<br>
-/// a spill at a better location.<br>
-bool InlineSpiller::hoistSpill(LiveInterval &SpillLI, MachineInstr &CopyMI) {<br>
+bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,<br>
+ MachineInstr &CopyMI) {<br>
SlotIndex Idx = LIS.getInstructionIndex(CopyMI);<br>
VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());<br>
assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy");<br>
- SibValueMap::iterator I = SibValues.find(VNI);<br>
- if (I == SibValues.end())<br>
- return false;<br>
-<br>
- const SibValueInfo &SVI = I->second;<br>
-<br>
- // Let the normal folding code deal with the boring case.<br>
- if (!SVI.AllDefsAreReloads && SVI.SpillVNI == VNI)<br>
- return false;<br>
-<br>
- // SpillReg may have been deleted by remat and DCE.<br>
- if (!LIS.hasInterval(SVI.SpillReg)) {<br>
- DEBUG(dbgs() << "Stale interval: " << PrintReg(SVI.SpillReg) << '\n');<br>
- SibValues.erase(I);<br>
- return false;<br>
- }<br>
<br>
- LiveInterval &SibLI = LIS.getInterval(SVI.SpillReg);<br>
- if (!SibLI.containsValue(SVI.SpillVNI)) {<br>
- DEBUG(dbgs() << "Stale value: " << PrintReg(SVI.SpillReg) << '\n');<br>
- SibValues.erase(I);<br>
+ unsigned SrcReg = CopyMI.getOperand(1).getReg();<br>
+ LiveInterval &SrcLI = LIS.getInterval(SrcReg);<br>
+ VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);<br>
+ LiveQueryResult SrcQ = SrcLI.Query(Idx);<br>
+ MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);<br>
+ if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())<br>
return false;<br>
- }<br>
<br>
// Conservatively extend the stack slot range to the range of the original<br>
// value. We may be able to do better with stack slot coloring by being more<br>
@@ -719,35 +364,29 @@ bool InlineSpiller::hoistSpill(LiveInter<br>
DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "<br>
<< *StackInt << '\n');<br>
<br>
- // Already spilled everywhere.<br>
- if (SVI.AllDefsAreReloads) {<br>
- DEBUG(dbgs() << "\tno spill needed: " << SVI);<br>
- ++NumOmitReloadSpill;<br>
- return true;<br>
- }<br>
- // We are going to spill SVI.SpillVNI immediately after its def, so clear out<br>
+ // We are going to spill SrcVNI immediately after its def, so clear out<br>
// any later spills of the same value.<br>
- eliminateRedundantSpills(SibLI, SVI.SpillVNI);<br>
+ eliminateRedundantSpills(SrcLI, SrcVNI);<br>
<br>
- MachineBasicBlock *MBB = LIS.getMBBFromIndex(SVI.SpillVNI->def);<br>
+ MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);<br>
MachineBasicBlock::iterator MII;<br>
- if (SVI.SpillVNI->isPHIDef())<br>
+ if (SrcVNI->isPHIDef())<br>
MII = MBB->SkipPHIsAndLabels(MBB->begin());<br>
else {<br>
- MachineInstr *DefMI = LIS.getInstructionFromIndex(SVI.SpillVNI->def);<br>
+ MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);<br>
assert(DefMI && "Defining instruction disappeared");<br>
MII = DefMI;<br>
++MII;<br>
}<br>
// Insert spill without kill flag immediately after def.<br>
- TII.storeRegToStackSlot(*MBB, MII, SVI.SpillReg, false, StackSlot,<br>
- MRI.getRegClass(SVI.SpillReg), &TRI);<br>
+ TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,<br>
+ MRI.getRegClass(SrcReg), &TRI);<br>
--MII; // Point to store instruction.<br>
LIS.InsertMachineInstrInMaps(*MII);<br>
- DEBUG(dbgs() << "\thoisted: " << SVI.SpillVNI->def << '\t' << *MII);<br>
+ DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);<br>
<br>
+ HSpiller.addToMergeableSpills(&(*MII), StackSlot, Original);<br>
++NumSpills;<br>
- ++NumHoists;<br>
return true;<br>
}<br>
<br>
@@ -805,7 +444,8 @@ void InlineSpiller::eliminateRedundantSp<br>
MI->setDesc(TII.get(TargetOpcode::KILL));<br>
DeadDefs.push_back(MI);<br>
++NumSpillsRemoved;<br>
- --NumSpills;<br>
+ if (HSpiller.rmFromMergeableSpills(MI, StackSlot))<br>
+ --NumSpills;<br>
}<br>
}<br>
} while (!WorkList.empty());<br>
@@ -876,12 +516,12 @@ bool InlineSpiller::reMaterializeFor(Liv<br>
if (SnippetCopies.count(&MI))<br>
return false;<br>
<br>
- // Use an OrigVNI from traceSiblingValue when ParentVNI is a sibling copy.<br>
+ LiveInterval &OrigLI = LIS.getInterval(Original);<br>
+ VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);<br>
LiveRangeEdit::Remat RM(ParentVNI);<br>
- SibValueMap::const_iterator SibI = SibValues.find(ParentVNI);<br>
- if (SibI != SibValues.end())<br>
- RM.OrigMI = SibI->second.DefMI;<br>
- if (!Edit->canRematerializeAt(RM, UseIdx, false)) {<br>
+ RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);<br>
+<br>
+ if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {<br>
markValueUsed(&VirtReg, ParentVNI);<br>
DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);<br>
return false;<br>
@@ -931,7 +571,6 @@ bool InlineSpiller::reMaterializeFor(Liv<br>
/// reMaterializeAll - Try to rematerialize as many uses as possible,<br>
/// and trim the live ranges after.<br>
void InlineSpiller::reMaterializeAll() {<br>
- // analyzeSiblingValues has already tested all relevant defining instructions.<br>
if (!Edit->anyRematerializable(AA))<br>
return;<br>
<br>
@@ -1017,6 +656,9 @@ bool InlineSpiller::coalesceStackAccess(<br>
if (InstrReg != Reg || FI != StackSlot)<br>
return false;<br>
<br>
+ if (!IsLoad)<br>
+ HSpiller.rmFromMergeableSpills(MI, StackSlot);<br>
+<br>
DEBUG(dbgs() << "Coalescing stack access: " << *MI);<br>
LIS.RemoveMachineInstrFromMaps(*MI);<br>
MI->eraseFromParent();<br>
@@ -1141,6 +783,9 @@ foldMemoryOperand(ArrayRef<std::pair<Mac<br>
LIS.removePhysRegDefAt(Reg, Idx);<br>
}<br>
<br>
+ int FI;<br>
+ if (TII.isStoreToStackSlot(MI, FI) && HSpiller.rmFromMergeableSpills(MI, FI))<br>
+ --NumSpills;<br>
LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);<br>
MI->eraseFromParent();<br>
<br>
@@ -1166,9 +811,10 @@ foldMemoryOperand(ArrayRef<std::pair<Mac<br>
<br>
if (!WasCopy)<br>
++NumFolded;<br>
- else if (Ops.front().second == 0)<br>
+ else if (Ops.front().second == 0) {<br>
++NumSpills;<br>
- else<br>
+ HSpiller.addToMergeableSpills(FoldMI, StackSlot, Original);<br>
+ } else<br>
++NumReloads;<br>
return true;<br>
}<br>
@@ -1203,6 +849,7 @@ void InlineSpiller::insertSpill(unsigned<br>
DEBUG(dumpMachineInstrRangeWithSlotIndex(std::next(MI), MIS.end(), LIS,<br>
"spill"));<br>
++NumSpills;<br>
+ HSpiller.addToMergeableSpills(std::next(MI), StackSlot, Original);<br>
}<br>
<br>
/// spillAroundUses - insert spill code around each use of Reg.<br>
@@ -1266,8 +913,7 @@ void InlineSpiller::spillAroundUses(unsi<br>
continue;<br>
}<br>
if (RI.Writes) {<br>
- // Hoist the spill of a sib-reg copy.<br>
- if (hoistSpill(OldLI, *MI)) {<br>
+ if (hoistSpillInsideBB(OldLI, *MI)) {<br>
// This COPY is now dead, the value is already in the stack slot.<br>
MI->getOperand(0).setIsDead();<br>
DeadDefs.push_back(MI);<br>
@@ -1380,7 +1026,6 @@ void InlineSpiller::spill(LiveRangeEdit<br>
assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");<br>
<br>
collectRegsToSpill();<br>
- analyzeSiblingValues();<br>
reMaterializeAll();<br>
<br>
// Remat may handle everything.<br>
@@ -1389,3 +1034,394 @@ void InlineSpiller::spill(LiveRangeEdit<br>
<br>
Edit->calculateRegClassAndHint(MF, Loops, MBFI);<br>
}<br>
+<br>
+/// Optimizations after all the reg selections and spills are done.<br>
+///<br>
+void InlineSpiller::postOptimization() {<br>
+ SmallVector<unsigned, 4> NewVRegs;<br>
+ LiveRangeEdit LRE(nullptr, NewVRegs, MF, LIS, &VRM, nullptr);<br>
+ HSpiller.hoistAllSpills(LRE);<br>
+ assert(NewVRegs.size() == 0 &&<br>
+ "No new vregs should be generated in hoistAllSpills");<br>
+}<br>
+<br>
+/// When a spill is inserted, add the spill to MergeableSpills map.<br>
+///<br>
+void HoistSpillHelper::addToMergeableSpills(MachineInstr *Spill, int StackSlot,<br>
+ unsigned Original) {<br>
+ StackSlotToReg[StackSlot] = Original;<br>
+ SlotIndex Idx = LIS.getInstructionIndex(*Spill);<br>
+ VNInfo *OrigVNI = LIS.getInterval(Original).getVNInfoAt(Idx.getRegSlot());<br>
+ std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);<br>
+ MergeableSpills[MIdx].insert(Spill);<br>
+}<br>
+<br>
+/// When a spill is removed, remove the spill from MergeableSpills map.<br>
+/// Return true if the spill is removed successfully.<br>
+///<br>
+bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr *Spill,<br>
+ int StackSlot) {<br>
+ int Original = StackSlotToReg[StackSlot];<br>
+ if (!Original)<br>
+ return false;<br>
+ SlotIndex Idx = LIS.getInstructionIndex(*Spill);<br>
+ VNInfo *OrigVNI = LIS.getInterval(Original).getVNInfoAt(Idx.getRegSlot());<br>
+ std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);<br>
+ return MergeableSpills[MIdx].erase(Spill);<br>
+}<br>
+<br>
+/// Check BB to see if it is a possible target BB to place a hoisted spill,<br>
+/// i.e., there should be a living sibling of OrigReg at the insert point.<br>
+///<br>
+bool HoistSpillHelper::isSpillCandBB(unsigned OrigReg, VNInfo &OrigVNI,<br>
+ MachineBasicBlock &BB, unsigned &LiveReg) {<br>
+ SlotIndex Idx;<br>
+ MachineBasicBlock::iterator MI = BB.getFirstTerminator();<br>
+ if (MI != BB.end())<br>
+ Idx = LIS.getInstructionIndex(*MI);<br>
+ else<br>
+ Idx = LIS.getMBBEndIdx(&BB).getPrevSlot();<br>
+ SmallSetVector<unsigned, 16> &Siblings = Virt2SiblingsMap[OrigReg];<br>
+ assert((LIS.getInterval(OrigReg)).getVNInfoAt(Idx) == &OrigVNI &&<br>
+ "Unexpected VNI");<br>
+<br>
+ for (auto const SibReg : Siblings) {<br>
+ LiveInterval &LI = LIS.getInterval(SibReg);<br>
+ VNInfo *VNI = LI.getVNInfoAt(Idx);<br>
+ if (VNI) {<br>
+ LiveReg = SibReg;<br>
+ return true;<br>
+ }<br>
+ }<br>
+ return false;<br>
+}<br>
+<br>
+/// Remove redundent spills in the same BB. Save those redundent spills in<br>
+/// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.<br>
+///<br>
+void HoistSpillHelper::rmRedundantSpills(<br>
+ SmallPtrSet<MachineInstr *, 16> &Spills,<br>
+ SmallVectorImpl<MachineInstr *> &SpillsToRm,<br>
+ DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) {<br>
+ // For each spill saw, check SpillBBToSpill[] and see if its BB already has<br>
+ // another spill inside. If a BB contains more than one spill, only keep the<br>
+ // earlier spill with smaller SlotIndex.<br>
+ for (const auto CurrentSpill : Spills) {<br>
+ MachineBasicBlock *Block = CurrentSpill->getParent();<br>
+ MachineDomTreeNode *Node = MDT.DT->getNode(Block);<br>
+ MachineInstr *PrevSpill = SpillBBToSpill[Node];<br>
+ if (PrevSpill) {<br>
+ SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);<br>
+ SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);<br>
+ MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;<br>
+ MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;<br>
+ SpillsToRm.push_back(SpillToRm);<br>
+ SpillBBToSpill[MDT.DT->getNode(Block)] = SpillToKeep;<br>
+ } else {<br>
+ SpillBBToSpill[MDT.DT->getNode(Block)] = CurrentSpill;<br>
+ }<br>
+ }<br>
+ for (const auto SpillToRm : SpillsToRm)<br>
+ Spills.erase(SpillToRm);<br>
+}<br>
+<br>
+/// Starting from \p Root find a top-down traversal order of the dominator<br>
+/// tree to visit all basic blocks containing the elements of \p Spills.<br>
+/// Redundant spills will be found and put into \p SpillsToRm at the same<br>
+/// time. \p SpillBBToSpill will be populated as part of the process and<br>
+/// maps a basic block to the first store occurring in the basic block.<br>
+/// \post SpillsToRm.union(Spills@post) == Spills@pre<br>
+///<br>
+void HoistSpillHelper::getVisitOrders(<br>
+ MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,<br>
+ SmallVectorImpl<MachineDomTreeNode *> &Orders,<br>
+ SmallVectorImpl<MachineInstr *> &SpillsToRm,<br>
+ DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,<br>
+ DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) {<br>
+ // The set contains all the possible BB nodes to which we may hoist<br>
+ // original spills.<br>
+ SmallPtrSet<MachineDomTreeNode *, 8> WorkSet;<br>
+ // Save the BB nodes on the path from the first BB node containing<br>
+ // non-redundent spill to the Root node.<br>
+ SmallPtrSet<MachineDomTreeNode *, 8> NodesOnPath;<br>
+ // All the spills to be hoisted must originate from a single def instruction<br>
+ // to the OrigReg. It means the def instruction should dominate all the spills<br>
+ // to be hoisted. We choose the BB where the def instruction is located as<br>
+ // the Root.<br>
+ MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();<br>
+ // For every node on the dominator tree with spill, walk up on the dominator<br>
+ // tree towards the Root node until it is reached. If there is other node<br>
+ // containing spill in the middle of the path, the previous spill saw will<br>
+ // be redundent and the node containing it will be removed. All the nodes on<br>
+ // the path starting from the first node with non-redundent spill to the Root<br>
+ // node will be added to the WorkSet, which will contain all the possible<br>
+ // locations where spills may be hoisted to after the loop below is done.<br>
+ for (const auto Spill : Spills) {<br>
+ MachineBasicBlock *Block = Spill->getParent();<br>
+ MachineDomTreeNode *Node = MDT[Block];<br>
+ MachineInstr *SpillToRm = nullptr;<br>
+ while (Node != RootIDomNode) {<br>
+ // If Node dominates Block, and it already contains a spill, the spill in<br>
+ // Block will be redundent.<br>
+ if (Node != MDT[Block] && SpillBBToSpill[Node]) {<br>
+ SpillToRm = SpillBBToSpill[MDT[Block]];<br>
+ break;<br>
+ /// If we see the Node already in WorkSet, the path from the Node to<br>
+ /// the Root node must already be traversed by another spill.<br>
+ /// Then no need to repeat.<br>
+ } else if (WorkSet.count(Node)) {<br>
+ break;<br>
+ } else {<br>
+ NodesOnPath.insert(Node);<br>
+ }<br>
+ Node = Node->getIDom();<br>
+ }<br>
+ if (SpillToRm) {<br>
+ SpillsToRm.push_back(SpillToRm);<br>
+ } else {<br>
+ // Add a BB containing the original spills to SpillsToKeep -- i.e.,<br>
+ // set the initial status before hoisting start. The value of BBs<br>
+ // containing original spills is set to 0, in order to descriminate<br>
+ // with BBs containing hoisted spills which will be inserted to<br>
+ // SpillsToKeep later during hoisting.<br>
+ SpillsToKeep[MDT[Block]] = 0;<br>
+ WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());<br>
+ }<br>
+ NodesOnPath.clear();<br>
+ }<br>
+<br>
+ // Sort the nodes in WorkSet in top-down order and save the nodes<br>
+ // in Orders. Orders will be used for hoisting in runHoistSpills.<br>
+ unsigned idx = 0;<br>
+ Orders.push_back(MDT.DT->getNode(Root));<br>
+ do {<br>
+ MachineDomTreeNode *Node = Orders[idx++];<br>
+ const std::vector<MachineDomTreeNode *> &Children = Node->getChildren();<br>
+ unsigned NumChildren = Children.size();<br>
+ for (unsigned i = 0; i != NumChildren; ++i) {<br>
+ MachineDomTreeNode *Child = Children[i];<br>
+ if (WorkSet.count(Child))<br>
+ Orders.push_back(Child);<br>
+ }<br>
+ } while (idx != Orders.size());<br>
+ assert(Orders.size() == WorkSet.size() &&<br>
+ "Orders have different size with WorkSet");<br>
+<br>
+#ifndef NDEBUG<br>
+ DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n");<br>
+ SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();<br>
+ for (; RIt != Orders.rend(); RIt++)<br>
+ DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",");<br>
+ DEBUG(dbgs() << "\n");<br>
+#endif<br>
+}<br>
+<br>
+/// Try to hoist spills according to BB hotness. The spills to removed will<br>
+/// be saved in \p SpillsToRm. The spills to be inserted will be saved in<br>
+/// \p SpillsToIns.<br>
+///<br>
+void HoistSpillHelper::runHoistSpills(<br>
+ unsigned OrigReg, VNInfo &OrigVNI, SmallPtrSet<MachineInstr *, 16> &Spills,<br>
+ SmallVectorImpl<MachineInstr *> &SpillsToRm,<br>
+ DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns) {<br>
+ // Visit order of dominator tree nodes.<br>
+ SmallVector<MachineDomTreeNode *, 32> Orders;<br>
+ // SpillsToKeep contains all the nodes where spills are to be inserted<br>
+ // during hoisting. If the spill to be inserted is an original spill<br>
+ // (not a hoisted one), the value of the map entry is 0. If the spill<br>
+ // is a hoisted spill, the value of the map entry is the VReg to be used<br>
+ // as the source of the spill.<br>
+ DenseMap<MachineDomTreeNode *, unsigned> SpillsToKeep;<br>
+ // Map from BB to the first spill inside of it.<br>
+ DenseMap<MachineDomTreeNode *, MachineInstr *> SpillBBToSpill;<br>
+<br>
+ rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);<br>
+<br>
+ MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);<br>
+ getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,<br>
+ SpillBBToSpill);<br>
+<br>
+ // SpillsInSubTree keeps the map from a dom tree node to a pair of<br>
+ // nodes set and the cost of all the spills inside those nodes.<br>
+ // The nodes set are the locations where spills are to be inserted<br>
+ // in the subtree of current node.<br>
+ typedef std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency><br>
+ NodesCostPair;<br>
+ DenseMap<MachineDomTreeNode *, NodesCostPair> SpillsInSubTreeMap;<br>
+ // Iterate Orders set in reverse order, which will be a bottom-up order<br>
+ // in the dominator tree. Once we visit a dom tree node, we know its<br>
+ // children have already been visited and the spill locations in the<br>
+ // subtrees of all the children have been determined.<br>
+ SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();<br>
+ for (; RIt != Orders.rend(); RIt++) {<br>
+ MachineBasicBlock *Block = (*RIt)->getBlock();<br>
+ SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =<br>
+ SpillsInSubTreeMap[*RIt].first;<br>
+ // Total spill costs inside the sub tree.<br>
+ BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;<br>
+<br>
+ // If Block contains an original spill, simply continue.<br>
+ if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) {<br>
+ SpillsInSubTree.insert(*RIt);<br>
+ SubTreeCost = MBFI.getBlockFreq(Block);<br>
+ continue;<br>
+ }<br>
+<br>
+ // Collect spills in subtree of current node (*RIt) to<br>
+ // SpillsInSubTree.<br>
+ const std::vector<MachineDomTreeNode *> &Children = (*RIt)->getChildren();<br>
+ unsigned NumChildren = Children.size();<br>
+ for (unsigned i = 0; i != NumChildren; ++i) {<br>
+ MachineDomTreeNode *Child = Children[i];<br>
+ SpillsInSubTree.insert(SpillsInSubTreeMap[Child].first.begin(),<br>
+ SpillsInSubTreeMap[Child].first.end());<br>
+ SubTreeCost += SpillsInSubTreeMap[Child].second;<br>
+ SpillsInSubTreeMap.erase(Child);<br>
+ }<br>
+<br>
+ // No spills in subtree, simply continue.<br>
+ if (SpillsInSubTree.empty())<br>
+ continue;<br>
+<br>
+ // Check whether Block is a possible candidate to insert spill.<br>
+ unsigned LiveReg = 0;<br>
+ if (!isSpillCandBB(OrigReg, OrigVNI, *Block, LiveReg))<br>
+ continue;<br>
+<br>
+ // If there are multiple spills that could be merged, bias a little<br>
+ // to hoist the spill.<br>
+ BranchProbability MarginProb = (SpillsInSubTree.size() > 1)<br>
+ ? BranchProbability(9, 10)<br>
+ : BranchProbability(1, 1);<br>
+ if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {<br>
+ // Hoist: Move spills to current Block.<br>
+ for (const auto SpillBB : SpillsInSubTree) {<br>
+ // When SpillBB is a BB contains original spill, insert the spill<br>
+ // to SpillsToRm.<br>
+ if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() &&<br>
+ !SpillsToKeep[SpillBB]) {<br>
+ MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];<br>
+ SpillsToRm.push_back(SpillToRm);<br>
+ }<br>
+ // SpillBB will not contain spill anymore, remove it from SpillsToKeep.<br>
+ SpillsToKeep.erase(SpillBB);<br>
+ }<br>
+ // Current Block is the BB containing the new hoisted spill. Add it to<br>
+ // SpillsToKeep. LiveReg is the source of the new spill.<br>
+ SpillsToKeep[*RIt] = LiveReg;<br>
+ DEBUG({<br>
+ dbgs() << "spills in BB: ";<br>
+ for (const auto Rspill : SpillsInSubTree)<br>
+ dbgs() << Rspill->getBlock()->getNumber() << " ";<br>
+ dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()<br>
+ << "\n";<br>
+ });<br>
+ SpillsInSubTree.clear();<br>
+ SpillsInSubTree.insert(*RIt);<br>
+ SubTreeCost = MBFI.getBlockFreq(Block);<br>
+ }<br>
+ }<br>
+ // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),<br>
+ // save them to SpillsToIns.<br>
+ for (const auto Ent : SpillsToKeep) {<br>
+ if (Ent.second)<br>
+ SpillsToIns[Ent.first->getBlock()] = Ent.second;<br>
+ }<br>
+}<br>
+<br>
+/// For spills with equal values, remove redundent spills and hoist the left<br>
+/// to less hot spots.<br>
+///<br>
+/// Spills with equal values will be collected into the same set in<br>
+/// MergeableSpills when spill is inserted. These equal spills are originated<br>
+/// from the same define instruction and are dominated by the instruction.<br>
+/// Before hoisting all the equal spills, redundent spills inside in the same<br>
+/// BB is first marked to be deleted. Then starting from spills left, walk up<br>
+/// on the dominator tree towards the Root node where the define instruction<br>
+/// is located, mark the dominated spills to be deleted along the way and<br>
+/// collect the BB nodes on the path from non-dominated spills to the define<br>
+/// instruction into a WorkSet. The nodes in WorkSet are the candidate places<br>
+/// where we consider to hoist the spills. We iterate the WorkSet in bottom-up<br>
+/// order, and for each node, we will decide whether to hoist spills inside<br>
+/// its subtree to that node. In this way, we can get benefit locally even if<br>
+/// hoisting all the equal spills to one cold place is impossible.<br>
+///<br>
+void HoistSpillHelper::hoistAllSpills(LiveRangeEdit &Edit) {<br>
+ // Save the mapping between stackslot and its original reg.<br>
+ DenseMap<int, unsigned> SlotToOrigReg;<br>
+ for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {<br>
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);<br>
+ int Slot = VRM.getStackSlot(Reg);<br>
+ if (Slot != VirtRegMap::NO_STACK_SLOT)<br>
+ SlotToOrigReg[Slot] = VRM.getOriginal(Reg);<br>
+ unsigned Original = VRM.getPreSplitReg(Reg);<br>
+ if (!MRI.def_empty(Reg))<br>
+ Virt2SiblingsMap[Original].insert(Reg);<br>
+ }<br>
+<br>
+ // Each entry in MergeableSpills contains a spill set with equal values.<br>
+ for (auto &Ent : MergeableSpills) {<br>
+ int Slot = Ent.first.first;<br>
+ unsigned OrigReg = SlotToOrigReg[Slot];<br>
+ VNInfo *OrigVNI = Ent.first.second;<br>
+ SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;<br>
+ if (Ent.second.empty())<br>
+ continue;<br>
+<br>
+ DEBUG({<br>
+ dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"<br>
+ << "Equal spills in BB: ";<br>
+ for (const auto spill : EqValSpills)<br>
+ dbgs() << spill->getParent()->getNumber() << " ";<br>
+ dbgs() << "\n";<br>
+ });<br>
+<br>
+ // SpillsToRm is the spill set to be removed from EqValSpills.<br>
+ SmallVector<MachineInstr *, 16> SpillsToRm;<br>
+ // SpillsToIns is the spill set to be newly inserted after hoisting.<br>
+ DenseMap<MachineBasicBlock *, unsigned> SpillsToIns;<br>
+<br>
+ runHoistSpills(OrigReg, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);<br>
+<br>
+ DEBUG({<br>
+ dbgs() << "Finally inserted spills in BB: ";<br>
+ for (const auto Ispill : SpillsToIns)<br>
+ dbgs() << Ispill.first->getNumber() << " ";<br>
+ dbgs() << "\nFinally removed spills in BB: ";<br>
+ for (const auto Rspill : SpillsToRm)<br>
+ dbgs() << Rspill->getParent()->getNumber() << " ";<br>
+ dbgs() << "\n";<br>
+ });<br>
+<br>
+ // Stack live range update.<br>
+ LiveInterval &StackIntvl = LSS.getInterval(Slot);<br>
+ if (!SpillsToIns.empty() || !SpillsToRm.empty()) {<br>
+ LiveInterval &OrigLI = LIS.getInterval(OrigReg);<br>
+ StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,<br>
+ StackIntvl.getValNumInfo(0));<br>
+ }<br>
+<br>
+ // Insert hoisted spills.<br>
+ for (auto const Insert : SpillsToIns) {<br>
+ MachineBasicBlock *BB = Insert.first;<br>
+ unsigned LiveReg = Insert.second;<br>
+ MachineBasicBlock::iterator MI = BB->getFirstTerminator();<br>
+ TII.storeRegToStackSlot(*BB, MI, LiveReg, false, Slot,<br>
+ MRI.getRegClass(LiveReg), &TRI);<br>
+ LIS.InsertMachineInstrRangeInMaps(std::prev(MI), MI);<br>
+ ++NumSpills;<br>
+ }<br>
+<br>
+ // Remove redundent spills or change them to dead instructions.<br>
+ NumSpills -= SpillsToRm.size();<br>
+ for (auto const RMEnt : SpillsToRm) {<br>
+ RMEnt->setDesc(TII.get(TargetOpcode::KILL));<br>
+ for (unsigned i = RMEnt->getNumOperands(); i; --i) {<br>
+ MachineOperand &MO = RMEnt->getOperand(i - 1);<br>
+ if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())<br>
+ RMEnt->RemoveOperand(i - 1);<br>
+ }<br>
+ }<br>
+ Edit.eliminateDeadDefs(SpillsToRm, None, true);<br>
+ }<br>
+}<br>
<br>
Modified: llvm/trunk/lib/CodeGen/LiveRangeEdit.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/LiveRangeEdit.cpp?rev=265309&r1=265308&r2=265309&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/LiveRangeEdit.cpp?rev=265309&r1=265308&r2=265309&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/CodeGen/LiveRangeEdit.cpp (original)<br>
+++ llvm/trunk/lib/CodeGen/LiveRangeEdit.cpp Mon Apr 4 11:42:40 2016<br>
@@ -63,10 +63,13 @@ void LiveRangeEdit::scanRemattable(Alias<br>
for (VNInfo *VNI : getParent().valnos) {<br>
if (VNI->isUnused())<br>
continue;<br>
- MachineInstr *DefMI = LIS.getInstructionFromIndex(VNI->def);<br>
+ unsigned Original = VRM->getOriginal(getReg());<br>
+ LiveInterval &OrigLI = LIS.getInterval(Original);<br>
+ VNInfo *OrigVNI = OrigLI.getVNInfoAt(VNI->def);<br>
+ MachineInstr *DefMI = LIS.getInstructionFromIndex(OrigVNI->def);<br>
if (!DefMI)<br>
continue;<br>
- checkRematerializable(VNI, DefMI, aa);<br>
+ checkRematerializable(OrigVNI, DefMI, aa);<br>
}<br>
ScannedRemattable = true;<br>
}<br>
@@ -113,24 +116,18 @@ bool LiveRangeEdit::allUsesAvailableAt(c<br>
return true;<br>
}<br>
<br>
-bool LiveRangeEdit::canRematerializeAt(Remat &RM,<br>
- SlotIndex UseIdx,<br>
- bool cheapAsAMove) {<br>
+bool LiveRangeEdit::canRematerializeAt(Remat &RM, VNInfo *OrigVNI,<br>
+ SlotIndex UseIdx, bool cheapAsAMove) {<br>
assert(ScannedRemattable && "Call anyRematerializable first");<br>
<br>
// Use scanRemattable info.<br>
- if (!Remattable.count(RM.ParentVNI))<br>
+ if (!Remattable.count(OrigVNI))<br>
return false;<br>
<br>
// No defining instruction provided.<br>
SlotIndex DefIdx;<br>
- if (RM.OrigMI)<br>
- DefIdx = LIS.getInstructionIndex(*RM.OrigMI);<br>
- else {<br>
- DefIdx = RM.ParentVNI->def;<br>
- RM.OrigMI = LIS.getInstructionFromIndex(DefIdx);<br>
- assert(RM.OrigMI && "No defining instruction for remattable value");<br>
- }<br>
+ assert(RM.OrigMI && "No defining instruction for remattable value");<br>
+ DefIdx = LIS.getInstructionIndex(*RM.OrigMI);<br>
<br>
// If only cheap remats were requested, bail out early.<br>
if (cheapAsAMove && !TII.isAsCheapAsAMove(RM.OrigMI))<br>
@@ -261,6 +258,15 @@ void LiveRangeEdit::eliminateDeadDef(Mac<br>
// Collect virtual registers to be erased after MI is gone.<br>
SmallVector<unsigned, 8> RegsToErase;<br>
bool ReadsPhysRegs = false;<br>
+ bool isOrigDef = false;<br>
+ unsigned Dest;<br>
+ if (VRM && MI->getOperand(0).isReg()) {<br>
+ Dest = MI->getOperand(0).getReg();<br>
+ unsigned Original = VRM->getOriginal(Dest);<br>
+ LiveInterval &OrigLI = LIS.getInterval(Original);<br>
+ VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);<br>
+ isOrigDef = SlotIndex::isSameInstr(OrigVNI->def, Idx);<br>
+ }<br>
<br>
// Check for live intervals that may shrink<br>
for (MachineInstr::mop_iterator MOI = MI->operands_begin(),<br>
@@ -314,11 +320,24 @@ void LiveRangeEdit::eliminateDeadDef(Mac<br>
}<br>
DEBUG(dbgs() << "Converted physregs to:\t" << *MI);<br>
} else {<br>
- if (TheDelegate)<br>
- TheDelegate->LRE_WillEraseInstruction(MI);<br>
- LIS.RemoveMachineInstrFromMaps(*MI);<br>
- MI->eraseFromParent();<br>
- ++NumDCEDeleted;<br>
+ // If the dest of MI is an original reg, don't delete the inst. Replace<br>
+ // the dest with a new reg, keep the inst for remat of other siblings.<br>
+ // The inst is saved in LiveRangeEdit::DeadRemats and will be deleted<br>
+ // after all the allocations of the func are done.<br>
+ if (isOrigDef) {<br>
+ unsigned NewDest = createFrom(Dest);<br>
+ pop_back();<br>
+ markDeadRemat(MI);<br>
+ const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();<br>
+ MI->substituteRegister(Dest, NewDest, 0, TRI);<br>
+ MI->getOperand(0).setIsDead(false);<br>
+ } else {<br>
+ if (TheDelegate)<br>
+ TheDelegate->LRE_WillEraseInstruction(MI);<br>
+ LIS.RemoveMachineInstrFromMaps(*MI);<br>
+ MI->eraseFromParent();<br>
+ ++NumDCEDeleted;<br>
+ }<br>
}<br>
<br>
// Erase any virtregs that are now empty and unused. There may be <undef><br>
@@ -332,8 +351,9 @@ void LiveRangeEdit::eliminateDeadDef(Mac<br>
}<br>
}<br>
<br>
-void LiveRangeEdit::eliminateDeadDefs(SmallVectorImpl<MachineInstr*> &Dead,<br>
- ArrayRef<unsigned> RegsBeingSpilled) {<br>
+void LiveRangeEdit::eliminateDeadDefs(SmallVectorImpl<MachineInstr *> &Dead,<br>
+ ArrayRef<unsigned> RegsBeingSpilled,<br>
+ bool NoSplit) {<br>
ToShrinkSet ToShrink;<br>
<br>
for (;;) {<br>
@@ -355,6 +375,9 @@ void LiveRangeEdit::eliminateDeadDefs(Sm<br>
if (!LIS.shrinkToUses(LI, &Dead))<br>
continue;<br>
<br>
+ if (NoSplit)<br>
+ continue;<br>
+<br>
// Don't create new intervals for a register being spilled.<br>
// The new intervals would have to be spilled anyway so its not worth it.<br>
// Also they currently aren't spilled so creating them and not spilling<br>
<br>
Modified: llvm/trunk/lib/CodeGen/RegAllocBase.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegAllocBase.cpp?rev=265309&r1=265308&r2=265309&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegAllocBase.cpp?rev=265309&r1=265308&r2=265309&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/CodeGen/RegAllocBase.cpp (original)<br>
+++ llvm/trunk/lib/CodeGen/RegAllocBase.cpp Mon Apr 4 11:42:40 2016<br>
@@ -153,3 +153,12 @@ void RegAllocBase::allocatePhysRegs() {<br>
}<br>
}<br>
}<br>
+<br>
+void RegAllocBase::postOptimization() {<br>
+ spiller().postOptimization();<br>
+ for (auto DeadInst : DeadRemats) {<br>
+ LIS->RemoveMachineInstrFromMaps(*DeadInst);<br>
+ DeadInst->eraseFromParent();<br>
+ }<br>
+ DeadRemats.clear();<br>
+}<br>
<br>
Modified: llvm/trunk/lib/CodeGen/RegAllocBase.h<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegAllocBase.h?rev=265309&r1=265308&r2=265309&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegAllocBase.h?rev=265309&r1=265308&r2=265309&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/CodeGen/RegAllocBase.h (original)<br>
+++ llvm/trunk/lib/CodeGen/RegAllocBase.h Mon Apr 4 11:42:40 2016<br>
@@ -65,6 +65,12 @@ protected:<br>
LiveRegMatrix *Matrix;<br>
RegisterClassInfo RegClassInfo;<br>
<br>
+ /// Inst which is a def of an original reg and whose defs are already all<br>
+ /// dead after remat is saved in DeadRemats. The deletion of such inst is<br>
+ /// postponed till all the allocations are done, so its remat expr is<br>
+ /// always available for the remat of all the siblings of the original reg.<br>
+ SmallPtrSet<MachineInstr *, 32> DeadRemats;<br>
+<br>
RegAllocBase()<br>
: TRI(nullptr), MRI(nullptr), VRM(nullptr), LIS(nullptr), Matrix(nullptr) {}<br>
<br>
@@ -77,6 +83,10 @@ protected:<br>
// physical register assignments.<br>
void allocatePhysRegs();<br>
<br>
+ // Include spiller post optimization and removing dead defs left because of<br>
+ // rematerialization.<br>
+ virtual void postOptimization();<br>
+<br>
// Get a temporary reference to a Spiller instance.<br>
virtual Spiller &spiller() = 0;<br>
<br>
<br>
Modified: llvm/trunk/lib/CodeGen/RegAllocBasic.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegAllocBasic.cpp?rev=265309&r1=265308&r2=265309&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegAllocBasic.cpp?rev=265309&r1=265308&r2=265309&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/CodeGen/RegAllocBasic.cpp (original)<br>
+++ llvm/trunk/lib/CodeGen/RegAllocBasic.cpp Mon Apr 4 11:42:40 2016<br>
@@ -199,7 +199,7 @@ bool RABasic::spillInterferences(LiveInt<br>
Matrix->unassign(Spill);<br>
<br>
// Spill the extracted interval.<br>
- LiveRangeEdit LRE(&Spill, SplitVRegs, *MF, *LIS, VRM);<br>
+ LiveRangeEdit LRE(&Spill, SplitVRegs, *MF, *LIS, VRM, nullptr, &DeadRemats);<br>
spiller().spill(LRE);<br>
}<br>
return true;<br>
@@ -258,7 +258,7 @@ unsigned RABasic::selectOrSplit(LiveInte<br>
DEBUG(dbgs() << "spilling: " << VirtReg << '\n');<br>
if (!VirtReg.isSpillable())<br>
return ~0u;<br>
- LiveRangeEdit LRE(&VirtReg, SplitVRegs, *MF, *LIS, VRM);<br>
+ LiveRangeEdit LRE(&VirtReg, SplitVRegs, *MF, *LIS, VRM, nullptr, &DeadRemats);<br>
spiller().spill(LRE);<br>
<br>
// The live virtual register requesting allocation was spilled, so tell<br>
@@ -283,6 +283,7 @@ bool RABasic::runOnMachineFunction(Machi<br>
SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));<br>
<br>
allocatePhysRegs();<br>
+ postOptimization();<br>
<br>
// Diagnostic output before rewriting<br>
DEBUG(dbgs() << "Post alloc VirtRegMap:\n" << *VRM << "\n");<br>
<br>
Modified: llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp?rev=265309&r1=265308&r2=265309&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp?rev=265309&r1=265308&r2=265309&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp (original)<br>
+++ llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp Mon Apr 4 11:42:40 2016<br>
@@ -12,7 +12,6 @@<br>
//<br>
//===----------------------------------------------------------------------===//<br>
<br>
-#include "llvm/CodeGen/Passes.h"<br>
#include "AllocationOrder.h"<br>
#include "InterferenceCache.h"<br>
#include "LiveDebugVariables.h"<br>
@@ -33,6 +32,7 @@<br>
#include "llvm/CodeGen/MachineFunctionPass.h"<br>
#include "llvm/CodeGen/MachineLoopInfo.h"<br>
#include "llvm/CodeGen/MachineRegisterInfo.h"<br>
+#include "llvm/CodeGen/Passes.h"<br>
#include "llvm/CodeGen/RegAllocRegistry.h"<br>
#include "llvm/CodeGen/RegisterClassInfo.h"<br>
#include "llvm/CodeGen/VirtRegMap.h"<br>
@@ -44,6 +44,7 @@<br>
#include "llvm/Support/ErrorHandling.h"<br>
#include "llvm/Support/Timer.h"<br>
#include "llvm/Support/raw_ostream.h"<br>
+#include "llvm/Target/TargetInstrInfo.h"<br>
#include "llvm/Target/TargetSubtargetInfo.h"<br>
#include <queue><br>
<br>
@@ -55,14 +56,14 @@ STATISTIC(NumGlobalSplits, "Number of sp<br>
STATISTIC(NumLocalSplits, "Number of split local live ranges");<br>
STATISTIC(NumEvicted, "Number of interferences evicted");<br>
<br>
-static cl::opt<SplitEditor::ComplementSpillMode><br>
-SplitSpillMode("split-spill-mode", cl::Hidden,<br>
- cl::desc("Spill mode for splitting live ranges"),<br>
- cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"),<br>
- clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"),<br>
- clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"),<br>
- clEnumValEnd),<br>
- cl::init(SplitEditor::SM_Partition));<br>
+static cl::opt<SplitEditor::ComplementSpillMode> SplitSpillMode(<br>
+ "split-spill-mode", cl::Hidden,<br>
+ cl::desc("Spill mode for splitting live ranges"),<br>
+ cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"),<br>
+ clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"),<br>
+ clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"),<br>
+ clEnumValEnd),<br>
+ cl::init(SplitEditor::SM_Speed));<br>
<br>
static cl::opt<unsigned><br>
LastChanceRecoloringMaxDepth("lcr-max-depth", cl::Hidden,<br>
@@ -1465,7 +1466,7 @@ unsigned RAGreedy::doRegionSplit(LiveInt<br>
SmallVectorImpl<unsigned> &NewVRegs) {<br>
SmallVector<unsigned, 8> UsedCands;<br>
// Prepare split editor.<br>
- LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);<br>
+ LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);<br>
SE->reset(LREdit, SplitSpillMode);<br>
<br>
// Assign all edge bundles to the preferred candidate, or NoCand.<br>
@@ -1513,7 +1514,7 @@ unsigned RAGreedy::tryBlockSplit(LiveInt<br>
assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed");<br>
unsigned Reg = VirtReg.reg;<br>
bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));<br>
- LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);<br>
+ LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);<br>
SE->reset(LREdit, SplitSpillMode);<br>
ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();<br>
for (unsigned i = 0; i != UseBlocks.size(); ++i) {<br>
@@ -1585,7 +1586,7 @@ RAGreedy::tryInstructionSplit(LiveInterv<br>
<br>
// Always enable split spill mode, since we're effectively spilling to a<br>
// register.<br>
- LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);<br>
+ LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);<br>
SE->reset(LREdit, SplitEditor::SM_Size);<br>
<br>
ArrayRef<SlotIndex> Uses = SA->getUseSlots();<br>
@@ -1908,7 +1909,7 @@ unsigned RAGreedy::tryLocalSplit(LiveInt<br>
<< '-' << Uses[BestAfter] << ", " << BestDiff<br>
<< ", " << (BestAfter - BestBefore + 1) << " instrs\n");<br>
<br>
- LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);<br>
+ LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);<br>
SE->reset(LREdit);<br>
<br>
SE->openIntv();<br>
@@ -2551,7 +2552,7 @@ unsigned RAGreedy::selectOrSplitImpl(Liv<br>
NewVRegs.push_back(VirtReg.reg);<br>
} else {<br>
NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);<br>
- LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);<br>
+ LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);<br>
spiller().spill(LRE);<br>
setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done);<br>
<br>
@@ -2609,6 +2610,8 @@ bool RAGreedy::runOnMachineFunction(Mach<br>
<br>
allocatePhysRegs();<br>
tryHintsRecoloring();<br>
+ postOptimization();<br>
+<br>
releaseMemory();<br>
return true;<br>
}<br>
<br>
Modified: llvm/trunk/lib/CodeGen/RegAllocPBQP.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegAllocPBQP.cpp?rev=265309&r1=265308&r2=265309&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegAllocPBQP.cpp?rev=265309&r1=265308&r2=265309&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/CodeGen/RegAllocPBQP.cpp (original)<br>
+++ llvm/trunk/lib/CodeGen/RegAllocPBQP.cpp Mon Apr 4 11:42:40 2016<br>
@@ -123,6 +123,12 @@ private:<br>
<br>
RegSet VRegsToAlloc, EmptyIntervalVRegs;<br>
<br>
+ /// Inst which is a def of an original reg and whose defs are already all<br>
+ /// dead after remat is saved in DeadRemats. The deletion of such inst is<br>
+ /// postponed till all the allocations are done, so its remat expr is<br>
+ /// always available for the remat of all the siblings of the original reg.<br>
+ SmallPtrSet<MachineInstr *, 32> DeadRemats;<br>
+<br>
/// \brief Finds the initial set of vreg intervals to allocate.<br>
void findVRegIntervalsToAlloc(const MachineFunction &MF, LiveIntervals &LIS);<br>
<br>
@@ -146,6 +152,7 @@ private:<br>
void finalizeAlloc(MachineFunction &MF, LiveIntervals &LIS,<br>
VirtRegMap &VRM) const;<br>
<br>
+ void postOptimization(Spiller &VRegSpiller, LiveIntervals &LIS);<br>
};<br>
<br>
char RegAllocPBQP::ID = 0;<br>
@@ -631,7 +638,8 @@ void RegAllocPBQP::spillVReg(unsigned VR<br>
VirtRegMap &VRM, Spiller &VRegSpiller) {<br>
<br>
VRegsToAlloc.erase(VReg);<br>
- LiveRangeEdit LRE(&LIS.getInterval(VReg), NewIntervals, MF, LIS, &VRM);<br>
+ LiveRangeEdit LRE(&LIS.getInterval(VReg), NewIntervals, MF, LIS, &VRM,<br>
+ nullptr, &DeadRemats);<br>
VRegSpiller.spill(LRE);<br>
<br>
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();<br>
@@ -713,6 +721,16 @@ void RegAllocPBQP::finalizeAlloc(Machine<br>
}<br>
}<br>
<br>
+void RegAllocPBQP::postOptimization(Spiller &VRegSpiller, LiveIntervals &LIS) {<br>
+ VRegSpiller.postOptimization();<br>
+ /// Remove dead defs because of rematerialization.<br>
+ for (auto DeadInst : DeadRemats) {<br>
+ LIS.RemoveMachineInstrFromMaps(*DeadInst);<br>
+ DeadInst->eraseFromParent();<br>
+ }<br>
+ DeadRemats.clear();<br>
+}<br>
+<br>
static inline float normalizePBQPSpillWeight(float UseDefFreq, unsigned Size,<br>
unsigned NumInstr) {<br>
// All intervals have a spill weight that is mostly proportional to the number<br>
@@ -798,6 +816,7 @@ bool RegAllocPBQP::runOnMachineFunction(<br>
<br>
// Finalise allocation, allocate empty ranges.<br>
finalizeAlloc(MF, LIS, VRM);<br>
+ postOptimization(*VRegSpiller, LIS);<br>
VRegsToAlloc.clear();<br>
EmptyIntervalVRegs.clear();<br>
<br>
<br>
Modified: llvm/trunk/lib/CodeGen/Spiller.h<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/Spiller.h?rev=265309&r1=265308&r2=265309&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/Spiller.h?rev=265309&r1=265308&r2=265309&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/CodeGen/Spiller.h (original)<br>
+++ llvm/trunk/lib/CodeGen/Spiller.h Mon Apr 4 11:42:40 2016<br>
@@ -16,6 +16,7 @@ namespace llvm {<br>
class MachineFunction;<br>
class MachineFunctionPass;<br>
class VirtRegMap;<br>
+ class LiveIntervals;<br>
<br>
/// Spiller interface.<br>
///<br>
@@ -28,7 +29,7 @@ namespace llvm {<br>
<br>
/// spill - Spill the LRE.getParent() live interval.<br>
virtual void spill(LiveRangeEdit &LRE) = 0;<br>
-<br>
+ virtual void postOptimization() {};<br>
};<br>
<br>
/// Create and return a spiller that will insert spill code directly instead<br>
@@ -36,7 +37,6 @@ namespace llvm {<br>
Spiller *createInlineSpiller(MachineFunctionPass &pass,<br>
MachineFunction &mf,<br>
VirtRegMap &vrm);<br>
-<br>
}<br>
<br>
#endif<br>
<br>
Modified: llvm/trunk/lib/CodeGen/SplitKit.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SplitKit.cpp?rev=265309&r1=265308&r2=265309&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SplitKit.cpp?rev=265309&r1=265308&r2=265309&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/CodeGen/SplitKit.cpp (original)<br>
+++ llvm/trunk/lib/CodeGen/SplitKit.cpp Mon Apr 4 11:42:40 2016<br>
@@ -16,6 +16,7 @@<br>
#include "llvm/ADT/Statistic.h"<br>
#include "llvm/CodeGen/LiveIntervalAnalysis.h"<br>
#include "llvm/CodeGen/LiveRangeEdit.h"<br>
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"<br>
#include "llvm/CodeGen/MachineDominators.h"<br>
#include "llvm/CodeGen/MachineInstrBuilder.h"<br>
#include "llvm/CodeGen/MachineLoopInfo.h"<br>
@@ -430,8 +431,13 @@ VNInfo *SplitEditor::defFromParent(unsig<br>
bool Late = RegIdx != 0;<br>
<br>
// Attempt cheap-as-a-copy rematerialization.<br>
+ unsigned Original = VRM.getOriginal(Edit->get(RegIdx));<br>
+ LiveInterval &OrigLI = LIS.getInterval(Original);<br>
+ VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);<br>
LiveRangeEdit::Remat RM(ParentVNI);<br>
- if (Edit->canRematerializeAt(RM, UseIdx, true)) {<br>
+ RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);<br>
+<br>
+ if (Edit->canRematerializeAt(RM, OrigVNI, UseIdx, true)) {<br>
Def = Edit->rematerializeAt(MBB, I, LI->reg, RM, TRI, Late);<br>
++NumRemats;<br>
} else {<br>
@@ -716,7 +722,62 @@ SplitEditor::findShallowDominator(Machin<br>
}<br>
}<br>
<br>
-void SplitEditor::hoistCopiesForSize() {<br>
+void SplitEditor::computeRedundantBackCopies(<br>
+ DenseSet<unsigned> &NotToHoistSet, SmallVectorImpl<VNInfo *> &BackCopies) {<br>
+ LiveInterval *LI = &LIS.getInterval(Edit->get(0));<br>
+ LiveInterval *Parent = &Edit->getParent();<br>
+ SmallVector<SmallPtrSet<VNInfo *, 8>, 8> EqualVNs(Parent->getNumValNums());<br>
+ SmallPtrSet<VNInfo *, 8> DominatedVNIs;<br>
+<br>
+ // Aggregate VNIs having the same value as ParentVNI.<br>
+ for (VNInfo *VNI : LI->valnos) {<br>
+ if (VNI->isUnused())<br>
+ continue;<br>
+ VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(VNI->def);<br>
+ EqualVNs[ParentVNI->id].insert(VNI);<br>
+ }<br>
+<br>
+ // For VNI aggregation of each ParentVNI, collect dominated, i.e.,<br>
+ // redundant VNIs to BackCopies.<br>
+ for (unsigned i = 0, e = Parent->getNumValNums(); i != e; ++i) {<br>
+ VNInfo *ParentVNI = Parent->getValNumInfo(i);<br>
+ if (!NotToHoistSet.count(ParentVNI->id))<br>
+ continue;<br>
+ SmallPtrSetIterator<VNInfo *> It1 = EqualVNs[ParentVNI->id].begin();<br>
+ SmallPtrSetIterator<VNInfo *> It2 = It1;<br>
+ for (; It1 != EqualVNs[ParentVNI->id].end(); ++It1) {<br>
+ It2 = It1;<br>
+ for (++It2; It2 != EqualVNs[ParentVNI->id].end(); ++It2) {<br>
+ if (DominatedVNIs.count(*It1) || DominatedVNIs.count(*It2))<br>
+ continue;<br>
+<br>
+ MachineBasicBlock *MBB1 = LIS.getMBBFromIndex((*It1)->def);<br>
+ MachineBasicBlock *MBB2 = LIS.getMBBFromIndex((*It2)->def);<br>
+ if (MBB1 == MBB2) {<br>
+ DominatedVNIs.insert((*It1)->def < (*It2)->def ? (*It2) : (*It1));<br>
+ } else if (MDT.dominates(MBB1, MBB2)) {<br>
+ DominatedVNIs.insert(*It2);<br>
+ } else if (MDT.dominates(MBB2, MBB1)) {<br>
+ DominatedVNIs.insert(*It1);<br>
+ }<br>
+ }<br>
+ }<br>
+ if (!DominatedVNIs.empty()) {<br>
+ forceRecompute(0, ParentVNI);<br>
+ for (auto VNI : DominatedVNIs) {<br>
+ BackCopies.push_back(VNI);<br>
+ }<br>
+ DominatedVNIs.clear();<br>
+ }<br>
+ }<br>
+}<br>
+<br>
+/// For SM_Size mode, find a common dominator for all the back-copies for<br>
+/// the same ParentVNI and hoist the backcopies to the dominator BB.<br>
+/// For SM_Speed mode, if the common dominator is hot and it is not beneficial<br>
+/// to do the hoisting, simply remove the dominated backcopies for the same<br>
+/// ParentVNI.<br>
+void SplitEditor::hoistCopies() {<br>
// Get the complement interval, always RegIdx 0.<br>
LiveInterval *LI = &LIS.getInterval(Edit->get(0));<br>
LiveInterval *Parent = &Edit->getParent();<br>
@@ -725,6 +786,11 @@ void SplitEditor::hoistCopiesForSize() {<br>
// indexed by ParentVNI->id.<br>
typedef std::pair<MachineBasicBlock*, SlotIndex> DomPair;<br>
SmallVector<DomPair, 8> NearestDom(Parent->getNumValNums());<br>
+ // The total cost of all the back-copies for each ParentVNI.<br>
+ SmallVector<BlockFrequency, 8> Costs(Parent->getNumValNums());<br>
+ // The ParentVNI->id set for which hoisting back-copies are not beneficial<br>
+ // for Speed.<br>
+ DenseSet<unsigned> NotToHoistSet;<br>
<br>
// Find the nearest common dominator for parent values with multiple<br>
// back-copies. If a single back-copy dominates, put it in DomPair.second.<br>
@@ -740,6 +806,7 @@ void SplitEditor::hoistCopiesForSize() {<br>
continue;<br>
<br>
MachineBasicBlock *ValMBB = LIS.getMBBFromIndex(VNI->def);<br>
+<br>
DomPair &Dom = NearestDom[ParentVNI->id];<br>
<br>
// Keep directly defined parent values. This is either a PHI or an<br>
@@ -774,6 +841,7 @@ void SplitEditor::hoistCopiesForSize() {<br>
else if (Near != Dom.first)<br>
// None dominate. Hoist to common dominator, need new def.<br>
Dom = DomPair(Near, SlotIndex());<br>
+ Costs[ParentVNI->id] += MBFI.getBlockFreq(ValMBB);<br>
}<br>
<br>
DEBUG(dbgs() << "Multi-mapped complement " << VNI->id << '@' << VNI->def<br>
@@ -792,6 +860,11 @@ void SplitEditor::hoistCopiesForSize() {<br>
MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(ParentVNI->def);<br>
// Get a less loopy dominator than Dom.first.<br>
Dom.first = findShallowDominator(Dom.first, DefMBB);<br>
+ if (SpillMode == SM_Speed &&<br>
+ MBFI.getBlockFreq(Dom.first) > Costs[ParentVNI->id]) {<br>
+ NotToHoistSet.insert(ParentVNI->id);<br>
+ continue;<br>
+ }<br>
SlotIndex Last = LIS.getMBBEndIdx(Dom.first).getPrevSlot();<br>
Dom.second =<br>
defFromParent(0, ParentVNI, Last, *Dom.first,<br>
@@ -806,11 +879,18 @@ void SplitEditor::hoistCopiesForSize() {<br>
continue;<br>
VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(VNI->def);<br>
const DomPair &Dom = NearestDom[ParentVNI->id];<br>
- if (!Dom.first || Dom.second == VNI->def)<br>
+ if (!Dom.first || Dom.second == VNI->def ||<br>
+ NotToHoistSet.count(ParentVNI->id))<br>
continue;<br>
BackCopies.push_back(VNI);<br>
forceRecompute(0, ParentVNI);<br>
}<br>
+<br>
+ // If it is not beneficial to hoist all the BackCopies, simply remove<br>
+ // redundant BackCopies in speed mode.<br>
+ if (SpillMode == SM_Speed && !NotToHoistSet.empty())<br>
+ computeRedundantBackCopies(NotToHoistSet, BackCopies);<br>
+<br>
removeBackCopies(BackCopies);<br>
}<br>
<br>
@@ -1004,6 +1084,8 @@ void SplitEditor::deleteRematVictims() {<br>
// Dead defs end at the dead slot.<br>
if (S.end != S.valno->def.getDeadSlot())<br>
continue;<br>
+ if (S.valno->isPHIDef())<br>
+ continue;<br>
MachineInstr *MI = LIS.getInstructionFromIndex(S.valno->def);<br>
assert(MI && "Missing instruction for dead def");<br>
MI->addRegisterDead(LI->reg, &TRI);<br>
@@ -1048,10 +1130,9 @@ void SplitEditor::finish(SmallVectorImpl<br>
// Leave all back-copies as is.<br>
break;<br>
case SM_Size:<br>
- hoistCopiesForSize();<br>
- break;<br>
case SM_Speed:<br>
- llvm_unreachable("Spill mode 'speed' not implemented yet");<br>
+ // hoistCopies will behave differently between size and speed.<br>
+ hoistCopies();<br>
}<br>
<br>
// Transfer the simply mapped values, check if any are skipped.<br>
<br>
Modified: llvm/trunk/lib/CodeGen/SplitKit.h<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SplitKit.h?rev=265309&r1=265308&r2=265309&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SplitKit.h?rev=265309&r1=265308&r2=265309&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/CodeGen/SplitKit.h (original)<br>
+++ llvm/trunk/lib/CodeGen/SplitKit.h Mon Apr 4 11:42:40 2016<br>
@@ -18,6 +18,7 @@<br>
#include "LiveRangeCalc.h"<br>
#include "llvm/ADT/ArrayRef.h"<br>
#include "llvm/ADT/DenseMap.h"<br>
+#include "llvm/ADT/DenseSet.h"<br>
#include "llvm/ADT/IntervalMap.h"<br>
#include "llvm/ADT/SmallPtrSet.h"<br>
<br>
@@ -329,9 +330,14 @@ private:<br>
MachineBasicBlock *findShallowDominator(MachineBasicBlock *MBB,<br>
MachineBasicBlock *DefMBB);<br>
<br>
- /// hoistCopiesForSize - Hoist back-copies to the complement interval in a<br>
- /// way that minimizes code size. This implements the SM_Size spill mode.<br>
- void hoistCopiesForSize();<br>
+ /// Find out all the backCopies dominated by others.<br>
+ void computeRedundantBackCopies(DenseSet<unsigned> &NotToHoistSet,<br>
+ SmallVectorImpl<VNInfo *> &BackCopies);<br>
+<br>
+ /// Hoist back-copies to the complement interval. It tries to hoist all<br>
+ /// the back-copies to one BB if it is beneficial, or else simply remove<br>
+ /// redundent backcopies dominated by others.<br>
+ void hoistCopies();<br>
<br>
/// transferValues - Transfer values to the new ranges.<br>
/// Return true if any ranges were skipped.<br>
<br>
Removed: llvm/trunk/test/CodeGen/AArch64/aarch64-deferred-spilling.ll<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/aarch64-deferred-spilling.ll?rev=265308&view=auto" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/aarch64-deferred-spilling.ll?rev=265308&view=auto</a><br>
==============================================================================<br>
--- llvm/trunk/test/CodeGen/AArch64/aarch64-deferred-spilling.ll (original)<br>
+++ llvm/trunk/test/CodeGen/AArch64/aarch64-deferred-spilling.ll (removed)<br>
@@ -1,514 +0,0 @@<br>
-;RUN: llc < %s -mtriple=aarch64--linux-android -regalloc=greedy -enable-deferred-spilling=true -mcpu=cortex-a57 -disable-fp-elim | FileCheck %s --check-prefix=CHECK --check-prefix=DEFERRED<br>
-;RUN: llc < %s -mtriple=aarch64--linux-android -regalloc=greedy -enable-deferred-spilling=false -mcpu=cortex-a57 -disable-fp-elim | FileCheck %s --check-prefix=CHECK --check-prefix=REGULAR<br>
-<br>
-; Check that we do not end up with useless spill code.<br>
-;<br>
-; Move to the basic block we are interested in.<br>
-;<br>
-; CHECK: // %if.then.120<br>
-;<br>
-; REGULAR: str w21, [sp, #[[OFFSET:[0-9]+]]] // 4-byte Folded Spill<br>
-; Check that w21 wouldn't need to be spilled since it is never reused.<br>
-; REGULAR-NOT: {{[wx]}}21{{,?}}<br>
-;<br>
-; Check that w22 is used to carry a value through the call.<br>
-; DEFERRED-NOT: str {{[wx]}}22,<br>
-; DEFERRED: mov {{[wx]}}22,<br>
-; DEFERRED-NOT: str {{[wx]}}22,<br>
-;<br>
-; CHECK: bl fprintf<br>
-;<br>
-; DEFERRED-NOT: ldr {{[wx]}}22,<br>
-; DEFERRED: mov {{[wx][0-9]+}}, {{[wx]}}22<br>
-; DEFERRED-NOT: ldr {{[wx]}}22,<br>
-;<br>
-; REGULAR-NOT: {{[wx]}}21{{,?}}<br>
-; REGULAR: ldr w21, [sp, #[[OFFSET]]] // 4-byte Folded Reload<br>
-;<br>
-; End of the basic block we are interested in.<br>
-; CHECK: b<br>
-; CHECK: {{[^:]+}}: // %sw.bb.123<br>
-<br>
-%struct.__sFILE = type { i8*, i32, i32, i32, i32, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, i8*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }<br>
-%struct.__sbuf = type { i8*, i64 }<br>
-%struct.DState = type { %struct.bz_stream*, i32, i8, i32, i8, i32, i32, i32, i32, i32, i8, i32, i32, i32, i32, i32, [256 x i32], i32, [257 x i32], [257 x i32], i32*, i16*, i8*, i32, i32, i32, i32, i32, [256 x i8], [16 x i8], [256 x i8], [4096 x i8], [16 x i32], [18002 x i8], [18002 x i8], [6 x [258 x i8]], [6 x [258 x i32]], [6 x [258 x i32]], [6 x [258 x i32]], [6 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32*, i32*, i32* }<br>
-%struct.bz_stream = type { i8*, i32, i32, i32, i8*, i32, i32, i32, i8*, i8* (i8*, i32, i32)*, void (i8*, i8*)*, i8* }<br>
-<br>
-@__sF = external global [0 x %struct.__sFILE], align 8<br>
-@.str = private unnamed_addr constant [20 x i8] c"\0A [%d: stuff+mf \00", align 1<br>
-<br>
-declare i32 @fprintf(%struct.__sFILE* nocapture, i8* nocapture readonly, ...)<br>
-<br>
-declare void @bar(i32)<br>
-<br>
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)<br>
-<br>
-define i32 @foo(%struct.DState* %s) {<br>
-entry:<br>
- %state = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 1<br>
- %tmp = load i32, i32* %state, align 4<br>
- %cmp = icmp eq i32 %tmp, 10<br>
- %save_i = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 40<br>
- br i1 %cmp, label %if.end.thread, label %if.end<br>
-<br>
-if.end.thread: ; preds = %entry<br>
- %save_j = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 41<br>
- %save_t = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 42<br>
- %save_alphaSize = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 43<br>
- %save_nGroups = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 44<br>
- %save_nSelectors = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 45<br>
- %save_EOB = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 46<br>
- %save_groupNo = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 47<br>
- %save_groupPos = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 48<br>
- %save_nextSym = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 49<br>
- %save_nblockMAX = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 50<br>
- %save_nblock = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 51<br>
- %save_es = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 52<br>
- %save_N = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 53<br>
- %save_curr = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 54<br>
- %save_zt = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 55<br>
- %save_zn = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 56<br>
- %save_zvec = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 57<br>
- %save_zj = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 58<br>
- %tmp1 = bitcast i32* %save_i to i8*<br>
- call void @llvm.memset.p0i8.i64(i8* %tmp1, i8 0, i64 108, i32 4, i1 false)<br>
- br label %sw.default<br>
-<br>
-if.end: ; preds = %entry<br>
- %.pre = load i32, i32* %save_i, align 4<br>
- %save_j3.phi.trans.insert = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 41<br>
- %.pre406 = load i32, i32* %save_j3.phi.trans.insert, align 4<br>
- %save_t4.phi.trans.insert = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 42<br>
- %.pre407 = load i32, i32* %save_t4.phi.trans.insert, align 4<br>
- %save_alphaSize5.phi.trans.insert = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 43<br>
- %.pre408 = load i32, i32* %save_alphaSize5.phi.trans.insert, align 4<br>
- %save_nGroups6.phi.trans.insert = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 44<br>
- %.pre409 = load i32, i32* %save_nGroups6.phi.trans.insert, align 4<br>
- %save_nSelectors7.phi.trans.insert = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 45<br>
- %.pre410 = load i32, i32* %save_nSelectors7.phi.trans.insert, align 4<br>
- %save_EOB8.phi.trans.insert = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 46<br>
- %.pre411 = load i32, i32* %save_EOB8.phi.trans.insert, align 4<br>
- %save_groupNo9.phi.trans.insert = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 47<br>
- %.pre412 = load i32, i32* %save_groupNo9.phi.trans.insert, align 4<br>
- %save_groupPos10.phi.trans.insert = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 48<br>
- %.pre413 = load i32, i32* %save_groupPos10.phi.trans.insert, align 4<br>
- %save_nextSym11.phi.trans.insert = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 49<br>
- %.pre414 = load i32, i32* %save_nextSym11.phi.trans.insert, align 4<br>
- %save_nblockMAX12.phi.trans.insert = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 50<br>
- %.pre415 = load i32, i32* %save_nblockMAX12.phi.trans.insert, align 4<br>
- %save_nblock13.phi.trans.insert = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 51<br>
- %.pre416 = load i32, i32* %save_nblock13.phi.trans.insert, align 4<br>
- %save_es14.phi.trans.insert = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 52<br>
- %.pre417 = load i32, i32* %save_es14.phi.trans.insert, align 4<br>
- %save_N15.phi.trans.insert = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 53<br>
- %.pre418 = load i32, i32* %save_N15.phi.trans.insert, align 4<br>
- %save_curr16.phi.trans.insert = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 54<br>
- %.pre419 = load i32, i32* %save_curr16.phi.trans.insert, align 4<br>
- %save_zt17.phi.trans.insert = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 55<br>
- %.pre420 = load i32, i32* %save_zt17.phi.trans.insert, align 4<br>
- %save_zn18.phi.trans.insert = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 56<br>
- %.pre421 = load i32, i32* %save_zn18.phi.trans.insert, align 4<br>
- %save_zvec19.phi.trans.insert = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 57<br>
- %.pre422 = load i32, i32* %save_zvec19.phi.trans.insert, align 4<br>
- %save_zj20.phi.trans.insert = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 58<br>
- %.pre423 = load i32, i32* %save_zj20.phi.trans.insert, align 4<br>
- switch i32 %tmp, label %sw.default [<br>
- i32 13, label %<a href="http://sw.bb" rel="noreferrer" target="_blank">sw.bb</a><br>
- i32 14, label %if.end.sw.bb.65_crit_edge<br>
- i32 25, label %if.end.sw.bb.123_crit_edge<br>
- ]<br>
-<br>
-if.end.sw.bb.123_crit_edge: ; preds = %if.end<br>
- %.pre433 = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 8<br>
- br label %sw.bb.123<br>
-<br>
-if.end.sw.bb.65_crit_edge: ; preds = %if.end<br>
- %bsLive69.phi.trans.insert = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 8<br>
- %.pre426 = load i32, i32* %bsLive69.phi.trans.insert, align 4<br>
- br label %sw.bb.65<br>
-<br>
-<a href="http://sw.bb" rel="noreferrer" target="_blank">sw.bb</a>: ; preds = %if.end<br>
- %sunkaddr = ptrtoint %struct.DState* %s to i64<br>
- %sunkaddr485 = add i64 %sunkaddr, 8<br>
- %sunkaddr486 = inttoptr i64 %sunkaddr485 to i32*<br>
- store i32 13, i32* %sunkaddr486, align 4<br>
- %bsLive = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 8<br>
- %tmp2 = load i32, i32* %bsLive, align 4<br>
- %cmp28.400 = icmp sgt i32 %tmp2, 7<br>
- br i1 %cmp28.400, label %sw.bb.if.then.29_crit_edge, label %<a href="http://if.end.33.lr.ph" rel="noreferrer" target="_blank">if.end.33.lr.ph</a><br>
-<br>
-sw.bb.if.then.29_crit_edge: ; preds = %<a href="http://sw.bb" rel="noreferrer" target="_blank">sw.bb</a><br>
- %sunkaddr487 = ptrtoint %struct.DState* %s to i64<br>
- %sunkaddr488 = add i64 %sunkaddr487, 32<br>
- %sunkaddr489 = inttoptr i64 %sunkaddr488 to i32*<br>
- %.pre425 = load i32, i32* %sunkaddr489, align 4<br>
- br label %if.then.29<br>
-<br>
-<a href="http://if.end.33.lr.ph" rel="noreferrer" target="_blank">if.end.33.lr.ph</a>: ; preds = %<a href="http://sw.bb" rel="noreferrer" target="_blank">sw.bb</a><br>
- %tmp3 = bitcast %struct.DState* %s to %struct.bz_stream**<br>
- %.pre424 = load %struct.bz_stream*, %struct.bz_stream** %tmp3, align 8<br>
- %avail_in.phi.trans.insert = getelementptr inbounds %struct.bz_stream, %struct.bz_stream* %.pre424, i64 0, i32 1<br>
- %.pre430 = load i32, i32* %avail_in.phi.trans.insert, align 4<br>
- %tmp4 = add i32 %.pre430, -1<br>
- br label %if.end.33<br>
-<br>
-if.then.29: ; preds = %while.body.backedge, %sw.bb.if.then.29_crit_edge<br>
- %tmp5 = phi i32 [ %.pre425, %sw.bb.if.then.29_crit_edge ], [ %or, %while.body.backedge ]<br>
- %.lcssa393 = phi i32 [ %tmp2, %sw.bb.if.then.29_crit_edge ], [ %add, %while.body.backedge ]<br>
- %sub = add nsw i32 %.lcssa393, -8<br>
- %shr = lshr i32 %tmp5, %sub<br>
- %and = and i32 %shr, 255<br>
- %sunkaddr491 = ptrtoint %struct.DState* %s to i64<br>
- %sunkaddr492 = add i64 %sunkaddr491, 36<br>
- %sunkaddr493 = inttoptr i64 %sunkaddr492 to i32*<br>
- store i32 %sub, i32* %sunkaddr493, align 4<br>
- %blockSize100k = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 9<br>
- store i32 %and, i32* %blockSize100k, align 4<br>
- %and.off = add nsw i32 %and, -49<br>
- %tmp6 = icmp ugt i32 %and.off, 8<br>
- br i1 %tmp6, label %save_state_and_return, label %if.end.62<br>
-<br>
-if.end.33: ; preds = %while.body.backedge, %<a href="http://if.end.33.lr.ph" rel="noreferrer" target="_blank">if.end.33.lr.ph</a><br>
- %lsr.iv482 = phi i32 [ %tmp4, %<a href="http://if.end.33.lr.ph" rel="noreferrer" target="_blank">if.end.33.lr.ph</a> ], [ %lsr.iv.next483, %while.body.backedge ]<br>
- %tmp7 = phi i32 [ %tmp2, %<a href="http://if.end.33.lr.ph" rel="noreferrer" target="_blank">if.end.33.lr.ph</a> ], [ %add, %while.body.backedge ]<br>
- %cmp35 = icmp eq i32 %lsr.iv482, -1<br>
- br i1 %cmp35, label %save_state_and_return, label %if.end.37<br>
-<br>
-if.end.37: ; preds = %if.end.33<br>
- %tmp8 = bitcast %struct.bz_stream* %.pre424 to i8**<br>
- %sunkaddr494 = ptrtoint %struct.DState* %s to i64<br>
- %sunkaddr495 = add i64 %sunkaddr494, 32<br>
- %sunkaddr496 = inttoptr i64 %sunkaddr495 to i32*<br>
- %tmp9 = load i32, i32* %sunkaddr496, align 4<br>
- %shl = shl i32 %tmp9, 8<br>
- %tmp10 = load i8*, i8** %tmp8, align 8<br>
- %tmp11 = load i8, i8* %tmp10, align 1<br>
- %conv = zext i8 %tmp11 to i32<br>
- %or = or i32 %conv, %shl<br>
- store i32 %or, i32* %sunkaddr496, align 4<br>
- %add = add nsw i32 %tmp7, 8<br>
- %sunkaddr497 = ptrtoint %struct.DState* %s to i64<br>
- %sunkaddr498 = add i64 %sunkaddr497, 36<br>
- %sunkaddr499 = inttoptr i64 %sunkaddr498 to i32*<br>
- store i32 %add, i32* %sunkaddr499, align 4<br>
- %incdec.ptr = getelementptr inbounds i8, i8* %tmp10, i64 1<br>
- store i8* %incdec.ptr, i8** %tmp8, align 8<br>
- %sunkaddr500 = ptrtoint %struct.bz_stream* %.pre424 to i64<br>
- %sunkaddr501 = add i64 %sunkaddr500, 8<br>
- %sunkaddr502 = inttoptr i64 %sunkaddr501 to i32*<br>
- store i32 %lsr.iv482, i32* %sunkaddr502, align 4<br>
- %sunkaddr503 = ptrtoint %struct.bz_stream* %.pre424 to i64<br>
- %sunkaddr504 = add i64 %sunkaddr503, 12<br>
- %sunkaddr505 = inttoptr i64 %sunkaddr504 to i32*<br>
- %tmp12 = load i32, i32* %sunkaddr505, align 4<br>
- %inc = add i32 %tmp12, 1<br>
- store i32 %inc, i32* %sunkaddr505, align 4<br>
- %cmp49 = icmp eq i32 %inc, 0<br>
- br i1 %cmp49, label %if.then.51, label %while.body.backedge<br>
-<br>
-if.then.51: ; preds = %if.end.37<br>
- %sunkaddr506 = ptrtoint %struct.bz_stream* %.pre424 to i64<br>
- %sunkaddr507 = add i64 %sunkaddr506, 16<br>
- %sunkaddr508 = inttoptr i64 %sunkaddr507 to i32*<br>
- %tmp13 = load i32, i32* %sunkaddr508, align 4<br>
- %inc53 = add i32 %tmp13, 1<br>
- store i32 %inc53, i32* %sunkaddr508, align 4<br>
- br label %while.body.backedge<br>
-<br>
-while.body.backedge: ; preds = %if.then.51, %if.end.37<br>
- %lsr.iv.next483 = add i32 %lsr.iv482, -1<br>
- %cmp28 = icmp sgt i32 %add, 7<br>
- br i1 %cmp28, label %if.then.29, label %if.end.33<br>
-<br>
-if.end.62: ; preds = %if.then.29<br>
- %sub64 = add nsw i32 %and, -48<br>
- %sunkaddr509 = ptrtoint %struct.DState* %s to i64<br>
- %sunkaddr510 = add i64 %sunkaddr509, 40<br>
- %sunkaddr511 = inttoptr i64 %sunkaddr510 to i32*<br>
- store i32 %sub64, i32* %sunkaddr511, align 4<br>
- br label %sw.bb.65<br>
-<br>
-sw.bb.65: ; preds = %if.end.62, %if.end.sw.bb.65_crit_edge<br>
- %bsLive69.pre-phi = phi i32* [ %bsLive69.phi.trans.insert, %if.end.sw.bb.65_crit_edge ], [ %bsLive, %if.end.62 ]<br>
- %tmp14 = phi i32 [ %.pre426, %if.end.sw.bb.65_crit_edge ], [ %sub, %if.end.62 ]<br>
- %sunkaddr512 = ptrtoint %struct.DState* %s to i64<br>
- %sunkaddr513 = add i64 %sunkaddr512, 8<br>
- %sunkaddr514 = inttoptr i64 %sunkaddr513 to i32*<br>
- store i32 14, i32* %sunkaddr514, align 4<br>
- %cmp70.397 = icmp sgt i32 %tmp14, 7<br>
- br i1 %cmp70.397, label %if.then.72, label %<a href="http://if.end.82.lr.ph" rel="noreferrer" target="_blank">if.end.82.lr.ph</a><br>
-<br>
-<a href="http://if.end.82.lr.ph" rel="noreferrer" target="_blank">if.end.82.lr.ph</a>: ; preds = %sw.bb.65<br>
- %tmp15 = bitcast %struct.DState* %s to %struct.bz_stream**<br>
- %.pre427 = load %struct.bz_stream*, %struct.bz_stream** %tmp15, align 8<br>
- %avail_in84.phi.trans.insert = getelementptr inbounds %struct.bz_stream, %struct.bz_stream* %.pre427, i64 0, i32 1<br>
- %.pre431 = load i32, i32* %avail_in84.phi.trans.insert, align 4<br>
- %tmp16 = add i32 %.pre431, -1<br>
- br label %if.end.82<br>
-<br>
-if.then.72: ; preds = %while.body.68.backedge, %sw.bb.65<br>
- %.lcssa390 = phi i32 [ %tmp14, %sw.bb.65 ], [ %add97, %while.body.68.backedge ]<br>
- %sub76 = add nsw i32 %.lcssa390, -8<br>
- %sunkaddr516 = ptrtoint %struct.DState* %s to i64<br>
- %sunkaddr517 = add i64 %sunkaddr516, 36<br>
- %sunkaddr518 = inttoptr i64 %sunkaddr517 to i32*<br>
- store i32 %sub76, i32* %sunkaddr518, align 4<br>
- %currBlockNo = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 11<br>
- %tmp17 = load i32, i32* %currBlockNo, align 4<br>
- %inc117 = add nsw i32 %tmp17, 1<br>
- store i32 %inc117, i32* %currBlockNo, align 4<br>
- %verbosity = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 12<br>
- %tmp18 = load i32, i32* %verbosity, align 4<br>
- %cmp118 = icmp sgt i32 %tmp18, 1<br>
- br i1 %cmp118, label %if.then.120, label %sw.bb.123, !prof !0<br>
-<br>
-if.end.82: ; preds = %while.body.68.backedge, %<a href="http://if.end.82.lr.ph" rel="noreferrer" target="_blank">if.end.82.lr.ph</a><br>
- %lsr.iv480 = phi i32 [ %tmp16, %<a href="http://if.end.82.lr.ph" rel="noreferrer" target="_blank">if.end.82.lr.ph</a> ], [ %lsr.iv.next481, %while.body.68.backedge ]<br>
- %tmp19 = phi i32 [ %tmp14, %<a href="http://if.end.82.lr.ph" rel="noreferrer" target="_blank">if.end.82.lr.ph</a> ], [ %add97, %while.body.68.backedge ]<br>
- %cmp85 = icmp eq i32 %lsr.iv480, -1<br>
- br i1 %cmp85, label %save_state_and_return, label %if.end.88<br>
-<br>
-if.end.88: ; preds = %if.end.82<br>
- %tmp20 = bitcast %struct.bz_stream* %.pre427 to i8**<br>
- %sunkaddr519 = ptrtoint %struct.DState* %s to i64<br>
- %sunkaddr520 = add i64 %sunkaddr519, 32<br>
- %sunkaddr521 = inttoptr i64 %sunkaddr520 to i32*<br>
- %tmp21 = load i32, i32* %sunkaddr521, align 4<br>
- %shl90 = shl i32 %tmp21, 8<br>
- %tmp22 = load i8*, i8** %tmp20, align 8<br>
- %tmp23 = load i8, i8* %tmp22, align 1<br>
- %conv93 = zext i8 %tmp23 to i32<br>
- %or94 = or i32 %conv93, %shl90<br>
- store i32 %or94, i32* %sunkaddr521, align 4<br>
- %add97 = add nsw i32 %tmp19, 8<br>
- %sunkaddr522 = ptrtoint %struct.DState* %s to i64<br>
- %sunkaddr523 = add i64 %sunkaddr522, 36<br>
- %sunkaddr524 = inttoptr i64 %sunkaddr523 to i32*<br>
- store i32 %add97, i32* %sunkaddr524, align 4<br>
- %incdec.ptr100 = getelementptr inbounds i8, i8* %tmp22, i64 1<br>
- store i8* %incdec.ptr100, i8** %tmp20, align 8<br>
- %sunkaddr525 = ptrtoint %struct.bz_stream* %.pre427 to i64<br>
- %sunkaddr526 = add i64 %sunkaddr525, 8<br>
- %sunkaddr527 = inttoptr i64 %sunkaddr526 to i32*<br>
- store i32 %lsr.iv480, i32* %sunkaddr527, align 4<br>
- %sunkaddr528 = ptrtoint %struct.bz_stream* %.pre427 to i64<br>
- %sunkaddr529 = add i64 %sunkaddr528, 12<br>
- %sunkaddr530 = inttoptr i64 %sunkaddr529 to i32*<br>
- %tmp24 = load i32, i32* %sunkaddr530, align 4<br>
- %inc106 = add i32 %tmp24, 1<br>
- store i32 %inc106, i32* %sunkaddr530, align 4<br>
- %cmp109 = icmp eq i32 %inc106, 0<br>
- br i1 %cmp109, label %if.then.111, label %while.body.68.backedge<br>
-<br>
-if.then.111: ; preds = %if.end.88<br>
- %sunkaddr531 = ptrtoint %struct.bz_stream* %.pre427 to i64<br>
- %sunkaddr532 = add i64 %sunkaddr531, 16<br>
- %sunkaddr533 = inttoptr i64 %sunkaddr532 to i32*<br>
- %tmp25 = load i32, i32* %sunkaddr533, align 4<br>
- %inc114 = add i32 %tmp25, 1<br>
- store i32 %inc114, i32* %sunkaddr533, align 4<br>
- br label %while.body.68.backedge<br>
-<br>
-while.body.68.backedge: ; preds = %if.then.111, %if.end.88<br>
- %lsr.iv.next481 = add i32 %lsr.iv480, -1<br>
- %cmp70 = icmp sgt i32 %add97, 7<br>
- br i1 %cmp70, label %if.then.72, label %if.end.82<br>
-<br>
-if.then.120: ; preds = %if.then.72<br>
- %call = tail call i32 (%struct.__sFILE*, i8*, ...) @fprintf(%struct.__sFILE* getelementptr inbounds ([0 x %struct.__sFILE], [0 x %struct.__sFILE]* @__sF, i64 0, i64 2), i8* getelementptr inbounds ([20 x i8], [20 x i8]* @.str, i64 0, i64 0), i32 %inc117)<br>
- br label %sw.bb.123<br>
-<br>
-sw.bb.123: ; preds = %if.then.120, %if.then.72, %if.end.sw.bb.123_crit_edge<br>
- %bsLive127.pre-phi = phi i32* [ %.pre433, %if.end.sw.bb.123_crit_edge ], [ %bsLive69.pre-phi, %if.then.72 ], [ %bsLive69.pre-phi, %if.then.120 ]<br>
- %sunkaddr534 = ptrtoint %struct.DState* %s to i64<br>
- %sunkaddr535 = add i64 %sunkaddr534, 8<br>
- %sunkaddr536 = inttoptr i64 %sunkaddr535 to i32*<br>
- store i32 25, i32* %sunkaddr536, align 4<br>
- %tmp26 = load i32, i32* %bsLive127.pre-phi, align 4<br>
- %cmp128.395 = icmp sgt i32 %tmp26, 7<br>
- br i1 %cmp128.395, label %sw.bb.123.if.then.130_crit_edge, label %<a href="http://if.end.140.lr.ph" rel="noreferrer" target="_blank">if.end.140.lr.ph</a><br>
-<br>
-sw.bb.123.if.then.130_crit_edge: ; preds = %sw.bb.123<br>
- %sunkaddr537 = ptrtoint %struct.DState* %s to i64<br>
- %sunkaddr538 = add i64 %sunkaddr537, 32<br>
- %sunkaddr539 = inttoptr i64 %sunkaddr538 to i32*<br>
- %.pre429 = load i32, i32* %sunkaddr539, align 4<br>
- br label %if.then.130<br>
-<br>
-<a href="http://if.end.140.lr.ph" rel="noreferrer" target="_blank">if.end.140.lr.ph</a>: ; preds = %sw.bb.123<br>
- %tmp27 = bitcast %struct.DState* %s to %struct.bz_stream**<br>
- %.pre428 = load %struct.bz_stream*, %struct.bz_stream** %tmp27, align 8<br>
- %avail_in142.phi.trans.insert = getelementptr inbounds %struct.bz_stream, %struct.bz_stream* %.pre428, i64 0, i32 1<br>
- %.pre432 = load i32, i32* %avail_in142.phi.trans.insert, align 4<br>
- %tmp28 = add i32 %.pre432, -1<br>
- br label %if.end.140<br>
-<br>
-if.then.130: ; preds = %while.body.126.backedge, %sw.bb.123.if.then.130_crit_edge<br>
- %tmp29 = phi i32 [ %.pre429, %sw.bb.123.if.then.130_crit_edge ], [ %or152, %while.body.126.backedge ]<br>
- %.lcssa = phi i32 [ %tmp26, %sw.bb.123.if.then.130_crit_edge ], [ %add155, %while.body.126.backedge ]<br>
- %sub134 = add nsw i32 %.lcssa, -8<br>
- %shr135 = lshr i32 %tmp29, %sub134<br>
- store i32 %sub134, i32* %bsLive127.pre-phi, align 4<br>
- %origPtr = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 13<br>
- %tmp30 = load i32, i32* %origPtr, align 4<br>
- %shl175 = shl i32 %tmp30, 8<br>
- %conv176 = and i32 %shr135, 255<br>
- %or177 = or i32 %shl175, %conv176<br>
- store i32 %or177, i32* %origPtr, align 4<br>
- %nInUse = getelementptr inbounds %struct.DState, %struct.DState* %s, i64 0, i32 27<br>
- %tmp31 = load i32, i32* %nInUse, align 4<br>
- %add179 = add nsw i32 %tmp31, 2<br>
- br label %save_state_and_return<br>
-<br>
-if.end.140: ; preds = %while.body.126.backedge, %<a href="http://if.end.140.lr.ph" rel="noreferrer" target="_blank">if.end.140.lr.ph</a><br>
- %lsr.iv = phi i32 [ %tmp28, %<a href="http://if.end.140.lr.ph" rel="noreferrer" target="_blank">if.end.140.lr.ph</a> ], [ %lsr.iv.next, %while.body.126.backedge ]<br>
- %tmp32 = phi i32 [ %tmp26, %<a href="http://if.end.140.lr.ph" rel="noreferrer" target="_blank">if.end.140.lr.ph</a> ], [ %add155, %while.body.126.backedge ]<br>
- %cmp143 = icmp eq i32 %lsr.iv, -1<br>
- br i1 %cmp143, label %save_state_and_return, label %if.end.146<br>
-<br>
-if.end.146: ; preds = %if.end.140<br>
- %tmp33 = bitcast %struct.bz_stream* %.pre428 to i8**<br>
- %sunkaddr541 = ptrtoint %struct.DState* %s to i64<br>
- %sunkaddr542 = add i64 %sunkaddr541, 32<br>
- %sunkaddr543 = inttoptr i64 %sunkaddr542 to i32*<br>
- %tmp34 = load i32, i32* %sunkaddr543, align 4<br>
- %shl148 = shl i32 %tmp34, 8<br>
- %tmp35 = load i8*, i8** %tmp33, align 8<br>
- %tmp36 = load i8, i8* %tmp35, align 1<br>
- %conv151 = zext i8 %tmp36 to i32<br>
- %or152 = or i32 %conv151, %shl148<br>
- store i32 %or152, i32* %sunkaddr543, align 4<br>
- %add155 = add nsw i32 %tmp32, 8<br>
- store i32 %add155, i32* %bsLive127.pre-phi, align 4<br>
- %incdec.ptr158 = getelementptr inbounds i8, i8* %tmp35, i64 1<br>
- store i8* %incdec.ptr158, i8** %tmp33, align 8<br>
- %sunkaddr544 = ptrtoint %struct.bz_stream* %.pre428 to i64<br>
- %sunkaddr545 = add i64 %sunkaddr544, 8<br>
- %sunkaddr546 = inttoptr i64 %sunkaddr545 to i32*<br>
- store i32 %lsr.iv, i32* %sunkaddr546, align 4<br>
- %sunkaddr547 = ptrtoint %struct.bz_stream* %.pre428 to i64<br>
- %sunkaddr548 = add i64 %sunkaddr547, 12<br>
- %sunkaddr549 = inttoptr i64 %sunkaddr548 to i32*<br>
- %tmp37 = load i32, i32* %sunkaddr549, align 4<br>
- %inc164 = add i32 %tmp37, 1<br>
- store i32 %inc164, i32* %sunkaddr549, align 4<br>
- %cmp167 = icmp eq i32 %inc164, 0<br>
- br i1 %cmp167, label %if.then.169, label %while.body.126.backedge<br>
-<br>
-if.then.169: ; preds = %if.end.146<br>
- %sunkaddr550 = ptrtoint %struct.bz_stream* %.pre428 to i64<br>
- %sunkaddr551 = add i64 %sunkaddr550, 16<br>
- %sunkaddr552 = inttoptr i64 %sunkaddr551 to i32*<br>
- %tmp38 = load i32, i32* %sunkaddr552, align 4<br>
- %inc172 = add i32 %tmp38, 1<br>
- store i32 %inc172, i32* %sunkaddr552, align 4<br>
- br label %while.body.126.backedge<br>
-<br>
-while.body.126.backedge: ; preds = %if.then.169, %if.end.146<br>
- %lsr.iv.next = add i32 %lsr.iv, -1<br>
- %cmp128 = icmp sgt i32 %add155, 7<br>
- br i1 %cmp128, label %if.then.130, label %if.end.140<br>
-<br>
-sw.default: ; preds = %if.end, %if.end.thread<br>
- %tmp39 = phi i32 [ 0, %if.end.thread ], [ %.pre, %if.end ]<br>
- %tmp40 = phi i32 [ 0, %if.end.thread ], [ %.pre406, %if.end ]<br>
- %tmp41 = phi i32 [ ...<br><br>[Message clipped] </blockquote></div><br><br clear="all"><div><br></div>-- <br><div class="gmail_signature"><span style="font-family:Times;font-size:medium"><table cellspacing="0" cellpadding="0"><tbody><tr style="color:rgb(85,85,85);font-family:sans-serif;font-size:small"><td nowrap style="border-top-style:solid;border-top-color:rgb(213,15,37);border-top-width:2px">Teresa Johnson |</td><td nowrap style="border-top-style:solid;border-top-color:rgb(51,105,232);border-top-width:2px"> Software Engineer |</td><td nowrap style="border-top-style:solid;border-top-color:rgb(0,153,57);border-top-width:2px"> <a href="mailto:tejohnson@google.com" target="_blank">tejohnson@google.com</a> |</td><td nowrap style="border-top-style:solid;border-top-color:rgb(238,178,17);border-top-width:2px"> 408-460-2413</td></tr></tbody></table></span></div>
</div>