[llvm-commits] [llvm] r41060 - in /llvm/trunk: include/llvm/CodeGen/LiveInterval.h include/llvm/CodeGen/LiveIntervalAnalysis.h lib/CodeGen/LiveIntervalAnalysis.cpp lib/CodeGen/RegAllocLinearScan.cpp lib/CodeGen/SimpleRegisterCoalescing.cpp lib/Co
Evan Cheng
evan.cheng at apple.com
Tue Aug 14 14:07:44 PDT 2007
Being fixed.
Evan
On Aug 14, 2007, at 11:23 AM, Lauro Ramos Venancio wrote:
> Hi Evan,
>
> This revision caused some regressions (see
> http://llvm.org/nightlytest/test.php?machine=142&night=3420). A
> testcase is attached.
>
>
> laurov at laurov-desktop:/test_1710/llvm-test/MultiSource/Benchmarks/
> MiBench/consumer-typeset$
> ~/llvm/llvm/build/Debug/bin/llc < bugpoint-reduced-simplified.bc
> llc: /home/laurov/llvm/llvm/lib/CodeGen/RegisterScavenging.cpp:142:
> void llvm::RegScavenger::forward(): Assertion `(isUnused(Reg) ||
> isReserved(Reg)) && "Re-defining a live register!"' failed.
>
> Lauro
>
> 2007/8/13, Evan Cheng <evan.cheng at apple.com>:
>> Author: evancheng
>> Date: Mon Aug 13 18:45:17 2007
>> New Revision: 41060
>>
>> URL: http://llvm.org/viewvc/llvm-project?rev=41060&view=rev
>> Log:
>> Re-implement trivial rematerialization. This allows def MIs whose
>> live intervals that are coalesced to be rematerialized.
>>
>> Modified:
>> llvm/trunk/include/llvm/CodeGen/LiveInterval.h
>> llvm/trunk/include/llvm/CodeGen/LiveIntervalAnalysis.h
>> llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp
>> llvm/trunk/lib/CodeGen/RegAllocLinearScan.cpp
>> llvm/trunk/lib/CodeGen/SimpleRegisterCoalescing.cpp
>> llvm/trunk/lib/CodeGen/VirtRegMap.cpp
>> llvm/trunk/lib/CodeGen/VirtRegMap.h
>>
>> Modified: llvm/trunk/include/llvm/CodeGen/LiveInterval.h
>> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/
>> CodeGen/LiveInterval.h?rev=41060&r1=41059&r2=41060&view=diff
>>
>> =====================================================================
>> =========
>> --- llvm/trunk/include/llvm/CodeGen/LiveInterval.h (original)
>> +++ llvm/trunk/include/llvm/CodeGen/LiveInterval.h Mon Aug 13
>> 18:45:17 2007
>> @@ -83,7 +83,6 @@
>> unsigned reg; // the register of this interval
>> unsigned preference; // preferred register to allocate for
>> this interval
>> float weight; // weight of this interval
>> - MachineInstr* remat; // definition if the definition
>> rematerializable
>> Ranges ranges; // the ranges in which this register is
>> live
>>
>> /// ValueNumberInfo - If the value number definition is
>> undefined (e.g. phi
>> @@ -101,7 +100,7 @@
>> public:
>>
>> LiveInterval(unsigned Reg, float Weight)
>> - : reg(Reg), preference(0), weight(Weight), remat(NULL) {
>> + : reg(Reg), preference(0), weight(Weight) {
>> }
>>
>> typedef Ranges::iterator iterator;
>> @@ -128,7 +127,6 @@
>> void swap(LiveInterval& other) {
>> std::swap(reg, other.reg);
>> std::swap(weight, other.weight);
>> - std::swap(remat, other.remat);
>> std::swap(ranges, other.ranges);
>> std::swap(ValueNumberInfo, other.ValueNumberInfo);
>> }
>>
>> Modified: llvm/trunk/include/llvm/CodeGen/LiveIntervalAnalysis.h
>> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/
>> CodeGen/LiveIntervalAnalysis.h?rev=41060&r1=41059&r2=41060&view=diff
>>
>> =====================================================================
>> =========
>> --- llvm/trunk/include/llvm/CodeGen/LiveIntervalAnalysis.h (original)
>> +++ llvm/trunk/include/llvm/CodeGen/LiveIntervalAnalysis.h Mon Aug
>> 13 18:45:17 2007
>> @@ -25,6 +25,8 @@
>> #include "llvm/ADT/BitVector.h"
>> #include "llvm/ADT/DenseMap.h"
>> #include "llvm/ADT/IndexedMap.h"
>> +#include "llvm/ADT/SmallPtrSet.h"
>> +#include "llvm/ADT/SmallVector.h"
>>
>> namespace llvm {
>>
>> @@ -41,9 +43,9 @@
>> const TargetInstrInfo* tii_;
>> LiveVariables* lv_;
>>
>> - /// MBB2IdxMap - The index of the first instruction in the
>> specified basic
>> - /// block.
>> - std::vector<unsigned> MBB2IdxMap;
>> + /// MBB2IdxMap - The indexes of the first and last
>> instructions in the
>> + /// specified basic block.
>> + std::vector<std::pair<unsigned, unsigned> > MBB2IdxMap;
>>
>> typedef std::map<MachineInstr*, unsigned> Mi2IndexMap;
>> Mi2IndexMap mi2iMap_;
>> @@ -56,6 +58,8 @@
>>
>> BitVector allocatableRegs_;
>>
>> + std::vector<MachineInstr*> ClonedMIs;
>> +
>> public:
>> static char ID; // Pass identification, replacement for typeid
>> LiveIntervals() : MachineFunctionPass((intptr_t)&ID) {}
>> @@ -118,10 +122,19 @@
>> unsigned getMBBStartIdx(MachineBasicBlock *MBB) const {
>> return getMBBStartIdx(MBB->getNumber());
>> }
>> -
>> unsigned getMBBStartIdx(unsigned MBBNo) const {
>> assert(MBBNo < MBB2IdxMap.size() && "Invalid MBB number!");
>> - return MBB2IdxMap[MBBNo];
>> + return MBB2IdxMap[MBBNo].first;
>> + }
>> +
>> + /// getMBBEndIdx - Return the store index of the last
>> instruction in the
>> + /// specified MachineBasicBlock.
>> + unsigned getMBBEndIdx(MachineBasicBlock *MBB) const {
>> + return getMBBEndIdx(MBB->getNumber());
>> + }
>> + unsigned getMBBEndIdx(unsigned MBBNo) const {
>> + assert(MBBNo < MBB2IdxMap.size() && "Invalid MBB number!");
>> + return MBB2IdxMap[MBBNo].second;
>> }
>>
>> /// getInstructionIndex - returns the base index of instr
>> @@ -155,8 +168,7 @@
>> const
>> std::vector<LiveRange> &LRs);
>>
>> std::vector<LiveInterval*> addIntervalsForSpills(const
>> LiveInterval& i,
>> - VirtRegMap&
>> vrm,
>> - int slot);
>> + VirtRegMap& vrm,
>> unsigned reg);
>>
>> // Interval removal
>>
>> @@ -225,6 +237,17 @@
>> unsigned MIIdx,
>> LiveInterval &interval, bool
>> isAlias = false);
>>
>> + /// isReMaterializable - Returns true if the definition MI of
>> the specified
>> + /// val# of the specified interval is re-materializable.
>> + bool isReMaterializable(const LiveInterval &li, unsigned ValNum,
>> + MachineInstr *MI);
>> +
>> + /// tryFoldMemoryOperand - Attempts to fold a spill / restore
>> from slot
>> + /// to reg into ith operand of specified MI. If it is
>> successul, MI is
>> + /// updated with the newly created MI and returns true.
>> + bool tryFoldMemoryOperand(MachineInstr* &MI, VirtRegMap &vrm,
>> unsigned index,
>> + unsigned i, int slot, unsigned reg);
>> +
>> static LiveInterval createInterval(unsigned Reg);
>>
>> void printRegName(unsigned reg) const;
>>
>> Modified: llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp
>> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/
>> LiveIntervalAnalysis.cpp?rev=41060&r1=41059&r2=41060&view=diff
>>
>> =====================================================================
>> =========
>> --- llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp (original)
>> +++ llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp Mon Aug 13
>> 18:45:17 2007
>> @@ -30,7 +30,6 @@
>> #include "llvm/Target/TargetMachine.h"
>> #include "llvm/Support/CommandLine.h"
>> #include "llvm/Support/Debug.h"
>> -#include "llvm/ADT/SmallSet.h"
>> #include "llvm/ADT/Statistic.h"
>> #include "llvm/ADT/STLExtras.h"
>> #include <algorithm>
>> @@ -60,6 +59,8 @@
>> mi2iMap_.clear();
>> i2miMap_.clear();
>> r2iMap_.clear();
>> + for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
>> + delete ClonedMIs[i];
>> }
>>
>> /// runOnMachineFunction - Register allocate the whole function
>> @@ -74,13 +75,12 @@
>>
>> // Number MachineInstrs and MachineBasicBlocks.
>> // Initialize MBB indexes to a sentinal.
>> - MBB2IdxMap.resize(mf_->getNumBlockIDs(), ~0U);
>> + MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
>>
>> unsigned MIIndex = 0;
>> for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
>> MBB != E; ++MBB) {
>> - // Set the MBB2IdxMap entry for this MBB.
>> - MBB2IdxMap[MBB->getNumber()] = MIIndex;
>> + unsigned StartIdx = MIIndex;
>>
>> for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB-
>> >end();
>> I != E; ++I) {
>> @@ -89,6 +89,9 @@
>> i2miMap_.push_back(I);
>> MIIndex += InstrSlots::NUM;
>> }
>> +
>> + // Set the MBB2IdxMap entry for this MBB.
>> + MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx,
>> MIIndex - 1);
>> }
>>
>> computeIntervals();
>> @@ -175,8 +178,76 @@
>> return NewLI;
>> }
>>
>> +/// isReDefinedByTwoAddr - Returns true if the Reg re-definition
>> is due to
>> +/// two addr elimination.
>> +static bool isReDefinedByTwoAddr(MachineInstr *MI, unsigned Reg,
>> + const TargetInstrInfo *TII) {
>> + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
>> + MachineOperand &MO1 = MI->getOperand(i);
>> + if (MO1.isRegister() && MO1.isDef() && MO1.getReg() == Reg) {
>> + for (unsigned j = i+1; j < e; ++j) {
>> + MachineOperand &MO2 = MI->getOperand(j);
>> + if (MO2.isRegister() && MO2.isUse() && MO2.getReg() ==
>> Reg &&
>> + MI->getInstrDescriptor()->
>> + getOperandConstraint(j, TOI::TIED_TO) == (int)i)
>> + return true;
>> + }
>> + }
>> + }
>> + return false;
>> +}
>> +
>> +/// isReMaterializable - Returns true if the definition MI of the
>> specified
>> +/// val# of the specified interval is re-materializable.
>> +bool LiveIntervals::isReMaterializable(const LiveInterval &li,
>> unsigned ValNum,
>> + MachineInstr *MI) {
>> + if (tii_->isTriviallyReMaterializable(MI))
>> + return true;
>> +
>> + int FrameIdx = 0;
>> + if (!tii_->isLoadFromStackSlot(MI, FrameIdx) ||
>> + !mf_->getFrameInfo()->isFixedObjectIndex(FrameIdx))
>> + return false;
>> +
>> + // This is a load from fixed stack slot. It can be
>> rematerialized unless it's
>> + // re-defined by a two-address instruction.
>> + for (unsigned i = 0, e = li.getNumValNums(); i != e; ++i) {
>> + if (i == ValNum)
>> + continue;
>> + unsigned DefIdx = li.getDefForValNum(i);
>> + if (DefIdx == ~1U)
>> + continue; // Dead val#.
>> + MachineInstr *DefMI = (DefIdx == ~0u)
>> + ? NULL : getInstructionFromIndex(DefIdx);
>> + if (DefMI && isReDefinedByTwoAddr(DefMI, li.reg, tii_))
>> + return false;
>> + }
>> + return true;
>> +}
>> +
>> +bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
>> VirtRegMap &vrm,
>> + unsigned index, unsigned i,
>> + int slot, unsigned reg) {
>> + MachineInstr *fmi = mri_->foldMemoryOperand(MI, i, slot);
>> + if (fmi) {
>> + // Attempt to fold the memory reference into the instruction. If
>> + // we can do this, we don't need to insert spill code.
>> + if (lv_)
>> + lv_->instructionChanged(MI, fmi);
>> + MachineBasicBlock &MBB = *MI->getParent();
>> + vrm.virtFolded(reg, MI, i, fmi);
>> + mi2iMap_.erase(MI);
>> + i2miMap_[index/InstrSlots::NUM] = fmi;
>> + mi2iMap_[fmi] = index;
>> + MI = MBB.insert(MBB.erase(MI), fmi);
>> + ++numFolded;
>> + return true;
>> + }
>> + return false;
>> +}
>> +
>> std::vector<LiveInterval*> LiveIntervals::
>> -addIntervalsForSpills(const LiveInterval &li, VirtRegMap &vrm,
>> int slot) {
>> +addIntervalsForSpills(const LiveInterval &li, VirtRegMap &vrm,
>> unsigned reg) {
>> // since this is called after the analysis is done we don't
>> know if
>> // LiveVariables is available
>> lv_ = getAnalysisToUpdate<LiveVariables>();
>> @@ -192,10 +263,72 @@
>>
>> const TargetRegisterClass* rc = mf_->getSSARegMap()->getRegClass
>> (li.reg);
>>
>> + unsigned NumValNums = li.getNumValNums();
>> + SmallVector<MachineInstr*, 4> ReMatDefs;
>> + ReMatDefs.resize(NumValNums, NULL);
>> + SmallVector<MachineInstr*, 4> ReMatOrigDefs;
>> + ReMatOrigDefs.resize(NumValNums, NULL);
>> + SmallVector<int, 4> ReMatIds;
>> + ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
>> + BitVector ReMatDelete(NumValNums);
>> + unsigned slot = VirtRegMap::MAX_STACK_SLOT;
>> +
>> + bool NeedStackSlot = false;
>> + for (unsigned i = 0; i != NumValNums; ++i) {
>> + unsigned DefIdx = li.getDefForValNum(i);
>> + if (DefIdx == ~1U)
>> + continue; // Dead val#.
>> + // Is the def for the val# rematerializable?
>> + MachineInstr *DefMI = (DefIdx == ~0u)
>> + ? NULL : getInstructionFromIndex(DefIdx);
>> + if (DefMI && isReMaterializable(li, i, DefMI)) {
>> + // Remember how to remat the def of this val#.
>> + ReMatOrigDefs[i] = DefMI;
>> + // Original def may be modified so we have to make a copy
>> here. vrm must
>> + // delete these!
>> + ReMatDefs[i] = DefMI = DefMI->clone();
>> + vrm.setVirtIsReMaterialized(reg, DefMI);
>> +
>> + bool CanDelete = true;
>> + const SmallVector<unsigned, 4> &kills = li.getKillsForValNum
>> (i);
>> + for (unsigned j = 0, ee = kills.size(); j != ee; ++j) {
>> + unsigned KillIdx = kills[j];
>> + MachineInstr *KillMI = (KillIdx & 1)
>> + ? NULL : getInstructionFromIndex(KillIdx);
>> + // Kill is a phi node, not all of its uses can be
>> rematerialized.
>> + // It must not be deleted.
>> + if (!KillMI) {
>> + CanDelete = false;
>> + // Need a stack slot if there is any live range where
>> uses cannot be
>> + // rematerialized.
>> + NeedStackSlot = true;
>> + break;
>> + }
>> + }
>> +
>> + if (CanDelete)
>> + ReMatDelete.set(i);
>> + } else {
>> + // Need a stack slot if there is any live range where uses
>> cannot be
>> + // rematerialized.
>> + NeedStackSlot = true;
>> + }
>> + }
>> +
>> + // One stack slot per live interval.
>> + if (NeedStackSlot)
>> + slot = vrm.assignVirt2StackSlot(reg);
>> +
>> for (LiveInterval::Ranges::const_iterator
>> - i = li.ranges.begin(), e = li.ranges.end(); i != e; ++i) {
>> - unsigned index = getBaseIndex(i->start);
>> - unsigned end = getBaseIndex(i->end-1) + InstrSlots::NUM;
>> + I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
>> + MachineInstr *DefMI = ReMatDefs[I->ValId];
>> + MachineInstr *OrigDefMI = ReMatOrigDefs[I->ValId];
>> + bool DefIsReMat = DefMI != NULL;
>> + bool CanDelete = ReMatDelete[I->ValId];
>> + int LdSlot = 0;
>> + bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot
>> (DefMI, LdSlot);
>> + unsigned index = getBaseIndex(I->start);
>> + unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
>> for (; index != end; index += InstrSlots::NUM) {
>> // skip deleted instructions
>> while (index != end && !getInstructionFromIndex(index))
>> @@ -208,87 +341,109 @@
>> for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
>> MachineOperand& mop = MI->getOperand(i);
>> if (mop.isRegister() && mop.getReg() == li.reg) {
>> - MachineInstr *fmi = li.remat ? NULL
>> - : mri_->foldMemoryOperand(MI, i, slot);
>> - if (fmi) {
>> - // Attempt to fold the memory reference into the
>> instruction. If we
>> - // can do this, we don't need to insert spill code.
>> - if (lv_)
>> - lv_->instructionChanged(MI, fmi);
>> - MachineBasicBlock &MBB = *MI->getParent();
>> - vrm.virtFolded(li.reg, MI, i, fmi);
>> - mi2iMap_.erase(MI);
>> - i2miMap_[index/InstrSlots::NUM] = fmi;
>> - mi2iMap_[fmi] = index;
>> - MI = MBB.insert(MBB.erase(MI), fmi);
>> - ++numFolded;
>> - // Folding the load/store can completely change the
>> instruction in
>> - // unpredictable ways, rescan it from the beginning.
>> - goto RestartInstruction;
>> + if (DefIsReMat) {
>> + // If this is the rematerializable definition MI
>> itself and
>> + // all of its uses are rematerialized, simply delete it.
>> + if (MI == OrigDefMI) {
>> + if (CanDelete) {
>> + RemoveMachineInstrFromMaps(MI);
>> + MI->eraseFromParent();
>> + break;
>> + } else if (tryFoldMemoryOperand(MI, vrm, index, i,
>> slot, li.reg))
>> + // Folding the load/store can completely change
>> the instruction
>> + // in unpredictable ways, rescan it from the
>> beginning.
>> + goto RestartInstruction;
>> + } else if (isLoadSS &&
>> + tryFoldMemoryOperand(MI, vrm, index, i,
>> LdSlot, li.reg)){
>> + // FIXME: Other rematerializable loads can be
>> folded as well.
>> + // Folding the load/store can completely change the
>> + // instruction in unpredictable ways, rescan it from
>> + // the beginning.
>> + goto RestartInstruction;
>> + }
>> } else {
>> - // Create a new virtual register for the spill interval.
>> - unsigned NewVReg = mf_->getSSARegMap()-
>> >createVirtualRegister(rc);
>> + if (tryFoldMemoryOperand(MI, vrm, index, i, slot,
>> li.reg))
>> + // Folding the load/store can completely change the
>> instruction in
>> + // unpredictable ways, rescan it from the beginning.
>> + goto RestartInstruction;
>> + }
>> +
>> + // Create a new virtual register for the spill interval.
>> + unsigned NewVReg = mf_->getSSARegMap()-
>> >createVirtualRegister(rc);
>>
>> - // Scan all of the operands of this instruction
>> rewriting operands
>> - // to use NewVReg instead of li.reg as appropriate.
>> We do this for
>> - // two reasons:
>> - //
>> - // 1. If the instr reads the same spilled vreg
>> multiple times, we
>> - // want to reuse the NewVReg.
>> - // 2. If the instr is a two-addr instruction, we
>> are required to
>> - // keep the src/dst regs pinned.
>> - //
>> - // Keep track of whether we replace a use and/or def
>> so that we can
>> - // create the spill interval with the appropriate range.
>> - mop.setReg(NewVReg);
>> + // Scan all of the operands of this instruction
>> rewriting operands
>> + // to use NewVReg instead of li.reg as appropriate. We
>> do this for
>> + // two reasons:
>> + //
>> + // 1. If the instr reads the same spilled vreg
>> multiple times, we
>> + // want to reuse the NewVReg.
>> + // 2. If the instr is a two-addr instruction, we are
>> required to
>> + // keep the src/dst regs pinned.
>> + //
>> + // Keep track of whether we replace a use and/or def so
>> that we can
>> + // create the spill interval with the appropriate range.
>> + mop.setReg(NewVReg);
>>
>> - bool HasUse = mop.isUse();
>> - bool HasDef = mop.isDef();
>> - for (unsigned j = i+1, e = MI->getNumOperands(); j !=
>> e; ++j) {
>> - if (MI->getOperand(j).isReg() &&
>> - MI->getOperand(j).getReg() == li.reg) {
>> - MI->getOperand(j).setReg(NewVReg);
>> - HasUse |= MI->getOperand(j).isUse();
>> - HasDef |= MI->getOperand(j).isDef();
>> - }
>> + bool HasUse = mop.isUse();
>> + bool HasDef = mop.isDef();
>> + for (unsigned j = i+1, e = MI->getNumOperands(); j !=
>> e; ++j) {
>> + if (MI->getOperand(j).isReg() &&
>> + MI->getOperand(j).getReg() == li.reg) {
>> + MI->getOperand(j).setReg(NewVReg);
>> + HasUse |= MI->getOperand(j).isUse();
>> + HasDef |= MI->getOperand(j).isDef();
>> }
>> + }
>>
>> - // create a new register for this spill
>> - vrm.grow();
>> - if (li.remat)
>> - vrm.setVirtIsReMaterialized(NewVReg, li.remat);
>> - vrm.assignVirt2StackSlot(NewVReg, slot);
>> - LiveInterval &nI = getOrCreateInterval(NewVReg);
>> - nI.remat = li.remat;
>> - assert(nI.empty());
>> -
>> - // the spill weight is now infinity as it
>> - // cannot be spilled again
>> - nI.weight = HUGE_VALF;
>> -
>> - if (HasUse) {
>> - LiveRange LR(getLoadIndex(index), getUseIndex(index),
>> - nI.getNextValue(~0U, 0));
>> - DOUT << " +" << LR;
>> - nI.addRange(LR);
>> + vrm.grow();
>> + if (DefIsReMat) {
>> + vrm.setVirtIsReMaterialized(NewVReg, DefMI/*,
>> CanDelete*/);
>> + if (ReMatIds[I->ValId] == VirtRegMap::MAX_STACK_SLOT) {
>> + // Each valnum may have its own remat id.
>> + ReMatIds[I->ValId] = vrm.assignVirtReMatId(NewVReg);
>> + } else {
>> + vrm.assignVirtReMatId(NewVReg, ReMatIds[I->ValId]);
>> }
>> - if (HasDef) {
>> - LiveRange LR(getDefIndex(index), getStoreIndex(index),
>> - nI.getNextValue(~0U, 0));
>> - DOUT << " +" << LR;
>> - nI.addRange(LR);
>> + if (!CanDelete || (HasUse && HasDef)) {
>> + // If this is a two-addr instruction then its use
>> operands are
>> + // rematerializable but its def is not. It should
>> be assigned a
>> + // stack slot.
>> + vrm.assignVirt2StackSlot(NewVReg, slot);
>> }
>> + } else {
>> + vrm.assignVirt2StackSlot(NewVReg, slot);
>> + }
>> +
>> + // create a new register interval for this spill / remat.
>> + LiveInterval &nI = getOrCreateInterval(NewVReg);
>> + assert(nI.empty());
>> +
>> + // the spill weight is now infinity as it
>> + // cannot be spilled again
>> + nI.weight = HUGE_VALF;
>> +
>> + if (HasUse) {
>> + LiveRange LR(getLoadIndex(index), getUseIndex(index),
>> + nI.getNextValue(~0U, 0));
>> + DOUT << " +" << LR;
>> + nI.addRange(LR);
>> + }
>> + if (HasDef) {
>> + LiveRange LR(getDefIndex(index), getStoreIndex(index),
>> + nI.getNextValue(~0U, 0));
>> + DOUT << " +" << LR;
>> + nI.addRange(LR);
>> + }
>>
>> - added.push_back(&nI);
>> + added.push_back(&nI);
>>
>> - // update live variables if it is available
>> - if (lv_)
>> - lv_->addVirtualRegisterKilled(NewVReg, MI);
>> + // update live variables if it is available
>> + if (lv_)
>> + lv_->addVirtualRegisterKilled(NewVReg, MI);
>>
>> - DOUT << "\t\t\t\tadded new interval: ";
>> - nI.print(DOUT, mri_);
>> - DOUT << '\n';
>> - }
>> + DOUT << "\t\t\t\tadded new interval: ";
>> + nI.print(DOUT, mri_);
>> + DOUT << '\n';
>> }
>> }
>> }
>> @@ -304,25 +459,6 @@
>> cerr << "%reg" << reg;
>> }
>>
>> -/// isReDefinedByTwoAddr - Returns true if the Reg re-definition
>> is due to
>> -/// two addr elimination.
>> -static bool isReDefinedByTwoAddr(MachineInstr *MI, unsigned Reg,
>> - const TargetInstrInfo *TII) {
>> - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
>> - MachineOperand &MO1 = MI->getOperand(i);
>> - if (MO1.isRegister() && MO1.isDef() && MO1.getReg() == Reg) {
>> - for (unsigned j = i+1; j < e; ++j) {
>> - MachineOperand &MO2 = MI->getOperand(j);
>> - if (MO2.isRegister() && MO2.isUse() && MO2.getReg() ==
>> Reg &&
>> - MI->getInstrDescriptor()->
>> - getOperandConstraint(j, TOI::TIED_TO) == (int)i)
>> - return true;
>> - }
>> - }
>> - }
>> - return false;
>> -}
>> -
>> void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
>>
>> MachineBasicBlock::iterator mi,
>> unsigned MIIdx,
>> @@ -335,16 +471,6 @@
>> // done once for the vreg. We use an empty interval to detect
>> the first
>> // time we see a vreg.
>> if (interval.empty()) {
>> - // Remember if the definition can be rematerialized. All
>> load's from fixed
>> - // stack slots are re-materializable. The target may permit
>> other
>> - // instructions to be re-materialized as well.
>> - int FrameIdx = 0;
>> - if (vi.DefInst &&
>> - (tii_->isTriviallyReMaterializable(vi.DefInst) ||
>> - (tii_->isLoadFromStackSlot(vi.DefInst, FrameIdx) &&
>> - mf_->getFrameInfo()->isFixedObjectIndex(FrameIdx))))
>> - interval.remat = vi.DefInst;
>> -
>> // Get the Idx of the defining instructions.
>> unsigned defIndex = getDefIndex(MIIdx);
>> unsigned ValNum;
>> @@ -421,9 +547,6 @@
>> }
>>
>> } else {
>> - // Can no longer safely assume definition is rematerializable.
>> - interval.remat = NULL;
>> -
>> // If this is the second time we see a virtual register
>> definition, it
>> // must be due to phi elimination or two addr elimination.
>> If this is
>> // the result of two address elimination, then the vreg is
>> one of the
>> @@ -487,7 +610,7 @@
>> DOUT << " Removing [" << Start << "," << End << "] from: ";
>> interval.print(DOUT, mri_); DOUT << "\n";
>> interval.removeRange(Start, End);
>> - interval.addKillForValNum(0, Start);
>> + interval.addKillForValNum(0, Start-1); // odd # means phi
>> node
>> DOUT << " RESULT: "; interval.print(DOUT, mri_);
>>
>> // Replace the interval with one of a NEW value number.
>> Note that this
>> @@ -514,7 +637,7 @@
>> unsigned killIndex = getInstructionIndex(&mbb->back()) +
>> InstrSlots::NUM;
>> LiveRange LR(defIndex, killIndex, ValNum);
>> interval.addRange(LR);
>> - interval.addKillForValNum(ValNum, killIndex);
>> + interval.addKillForValNum(ValNum, killIndex-1); // odd #
>> means phi node
>> DOUT << " +" << LR;
>> }
>> }
>>
>> Modified: llvm/trunk/lib/CodeGen/RegAllocLinearScan.cpp
>> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/
>> RegAllocLinearScan.cpp?rev=41060&r1=41059&r2=41060&view=diff
>>
>> =====================================================================
>> =========
>> --- llvm/trunk/lib/CodeGen/RegAllocLinearScan.cpp (original)
>> +++ llvm/trunk/lib/CodeGen/RegAllocLinearScan.cpp Mon Aug 13
>> 18:45:17 2007
>> @@ -305,7 +305,7 @@
>> for (unsigned i = 0, e = handled_.size(); i != e; ++i) {
>> LiveInterval *HI = handled_[i];
>> unsigned Reg = HI->reg;
>> - if (!vrm_->hasStackSlot(Reg) && HI->liveAt(StartIdx)) {
>> + if (vrm_->isAssignedReg(Reg) && HI->liveAt(StartIdx)) {
>> assert(MRegisterInfo::isVirtualRegister(Reg));
>> Reg = vrm_->getPhys(Reg);
>> MBB->addLiveIn(Reg);
>> @@ -605,14 +605,8 @@
>> // linearscan.
>> if (cur->weight != HUGE_VALF && cur->weight <= minWeight) {
>> DOUT << "\t\t\tspilling(c): " << *cur << '\n';
>> - // if the current interval is re-materializable, remember so
>> and don't
>> - // assign it a spill slot.
>> - if (cur->remat)
>> - vrm_->setVirtIsReMaterialized(cur->reg, cur->remat);
>> - int slot = cur->remat ? vrm_->assignVirtReMatId(cur->reg)
>> - : vrm_->assignVirt2StackSlot(cur->reg);
>> std::vector<LiveInterval*> added =
>> - li_->addIntervalsForSpills(*cur, *vrm_, slot);
>> + li_->addIntervalsForSpills(*cur, *vrm_, cur->reg);
>> if (added.empty())
>> return; // Early exit if all spills were folded.
>>
>> @@ -663,12 +657,8 @@
>> cur->overlapsFrom(*i->first, i->second)) {
>> DOUT << "\t\t\tspilling(a): " << *i->first << '\n';
>> earliestStart = std::min(earliestStart, i->first-
>> >beginNumber());
>> - if (i->first->remat)
>> - vrm_->setVirtIsReMaterialized(reg, i->first->remat);
>> - int slot = i->first->remat ? vrm_->assignVirtReMatId(reg)
>> - : vrm_->assignVirt2StackSlot(reg);
>> std::vector<LiveInterval*> newIs =
>> - li_->addIntervalsForSpills(*i->first, *vrm_, slot);
>> + li_->addIntervalsForSpills(*i->first, *vrm_, reg);
>> std::copy(newIs.begin(), newIs.end(), std::back_inserter
>> (added));
>> spilled.insert(reg);
>> }
>> @@ -680,12 +670,8 @@
>> cur->overlapsFrom(*i->first, i->second-1)) {
>> DOUT << "\t\t\tspilling(i): " << *i->first << '\n';
>> earliestStart = std::min(earliestStart, i->first-
>> >beginNumber());
>> - if (i->first->remat)
>> - vrm_->setVirtIsReMaterialized(reg, i->first->remat);
>> - int slot = i->first->remat ? vrm_->assignVirtReMatId(reg)
>> - : vrm_->assignVirt2StackSlot(reg);
>> std::vector<LiveInterval*> newIs =
>> - li_->addIntervalsForSpills(*i->first, *vrm_, slot);
>> + li_->addIntervalsForSpills(*i->first, *vrm_, reg);
>> std::copy(newIs.begin(), newIs.end(), std::back_inserter
>> (added));
>> spilled.insert(reg);
>> }
>>
>> Modified: llvm/trunk/lib/CodeGen/SimpleRegisterCoalescing.cpp
>> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/
>> SimpleRegisterCoalescing.cpp?rev=41060&r1=41059&r2=41060&view=diff
>>
>> =====================================================================
>> =========
>> --- llvm/trunk/lib/CodeGen/SimpleRegisterCoalescing.cpp (original)
>> +++ llvm/trunk/lib/CodeGen/SimpleRegisterCoalescing.cpp Mon Aug 13
>> 18:45:17 2007
>> @@ -1123,12 +1123,6 @@
>> continue;
>> LiveInterval &RegInt = li_->getInterval(reg);
>> float w = (mop.isUse()+mop.isDef()) * powf(10.0F,
>> (float)loopDepth);
>> - // If the definition instruction is re-
>> materializable, its spill
>> - // weight is half of what it would have been normally
>> unless it's
>> - // a load from fixed stack slot.
>> - int Dummy;
>> - if (RegInt.remat && !tii_->isLoadFromStackSlot
>> (RegInt.remat, Dummy))
>> - w /= 2;
>> RegInt.weight += w;
>> UniqueUses.insert(reg);
>> }
>>
>> Modified: llvm/trunk/lib/CodeGen/VirtRegMap.cpp
>> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/
>> VirtRegMap.cpp?rev=41060&r1=41059&r2=41060&view=diff
>>
>> =====================================================================
>> =========
>> --- llvm/trunk/lib/CodeGen/VirtRegMap.cpp (original)
>> +++ llvm/trunk/lib/CodeGen/VirtRegMap.cpp Mon Aug 13 18:45:17 2007
>> @@ -62,13 +62,17 @@
>> VirtRegMap::VirtRegMap(MachineFunction &mf)
>> : TII(*mf.getTarget().getInstrInfo()), MF(mf),
>> Virt2PhysMap(NO_PHYS_REG), Virt2StackSlotMap(NO_STACK_SLOT),
>> + Virt2ReMatIdMap(NO_STACK_SLOT), ReMatMap(NULL),
>> ReMatId(MAX_STACK_SLOT+1) {
>> grow();
>> }
>>
>> void VirtRegMap::grow() {
>> - Virt2PhysMap.grow(MF.getSSARegMap()->getLastVirtReg());
>> - Virt2StackSlotMap.grow(MF.getSSARegMap()->getLastVirtReg());
>> + unsigned LastVirtReg = MF.getSSARegMap()->getLastVirtReg();
>> + Virt2PhysMap.grow(LastVirtReg);
>> + Virt2StackSlotMap.grow(LastVirtReg);
>> + Virt2ReMatIdMap.grow(LastVirtReg);
>> + ReMatMap.grow(LastVirtReg);
>> }
>>
>> int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) {
>> @@ -95,19 +99,19 @@
>>
>> int VirtRegMap::assignVirtReMatId(unsigned virtReg) {
>> assert(MRegisterInfo::isVirtualRegister(virtReg));
>> - assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT &&
>> + assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT &&
>> "attempt to assign re-mat id to already spilled register");
>> - const MachineInstr *DefMI = getReMaterializedMI(virtReg);
>> - int FrameIdx;
>> - if (TII.isLoadFromStackSlot((MachineInstr*)DefMI, FrameIdx)) {
>> - // Load from stack slot is re-materialize as reload from the
>> stack slot!
>> - Virt2StackSlotMap[virtReg] = FrameIdx;
>> - return FrameIdx;
>> - }
>> - Virt2StackSlotMap[virtReg] = ReMatId;
>> + Virt2ReMatIdMap[virtReg] = ReMatId;
>> return ReMatId++;
>> }
>>
>> +void VirtRegMap::assignVirtReMatId(unsigned virtReg, int id) {
>> + assert(MRegisterInfo::isVirtualRegister(virtReg));
>> + assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT &&
>> + "attempt to assign re-mat id to already spilled register");
>> + Virt2ReMatIdMap[virtReg] = id;
>> +}
>> +
>> void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI,
>> unsigned OpNo, MachineInstr *NewMI) {
>> // Move previous memory references folded to new instruction.
>> @@ -194,7 +198,7 @@
>> if (MRegisterInfo::isVirtualRegister(MO.getReg())) {
>> unsigned VirtReg = MO.getReg();
>> unsigned PhysReg = VRM.getPhys(VirtReg);
>> - if (VRM.hasStackSlot(VirtReg)) {
>> + if (!VRM.isAssignedReg(VirtReg)) {
>> int StackSlot = VRM.getStackSlot(VirtReg);
>> const TargetRegisterClass* RC =
>> MF.getSSARegMap()->getRegClass(VirtReg);
>> @@ -246,43 +250,41 @@
>> DOUT << "\n**** Local spiller rewriting function '"
>> << MF.getFunction()->getName() << "':\n";
>>
>> - std::vector<MachineInstr *> ReMatedMIs;
>> for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
>> MBB != E; ++MBB)
>> - RewriteMBB(*MBB, VRM, ReMatedMIs);
>> - for (unsigned i = 0, e = ReMatedMIs.size(); i != e; ++i)
>> - delete ReMatedMIs[i];
>> + RewriteMBB(*MBB, VRM);
>> return true;
>> }
>> private:
>> - void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM,
>> - std::vector<MachineInstr*> &ReMatedMIs);
>> + void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM);
>> };
>> }
>>
>> /// AvailableSpills - As the local spiller is scanning and
>> rewriting an MBB from
>> -/// top down, keep track of which spills slots are available in
>> each register.
>> +/// top down, keep track of which spills slots or remat are
>> available in each
>> +/// register.
>> ///
>> /// Note that not all physregs are created equal here. In
>> particular, some
>> /// physregs are reloads that we are allowed to clobber or ignore
>> at any time.
>> /// Other physregs are values that the register allocated program
>> is using that
>> /// we cannot CHANGE, but we can read if we like. We keep track
>> of this on a
>> -/// per-stack-slot basis as the low bit in the value of the
>> SpillSlotsAvailable
>> -/// entries. The predicate 'canClobberPhysReg()' checks this bit
>> and
>> -/// addAvailable sets it if.
>> +/// per-stack-slot / remat id basis as the low bit in the value
>> of the
>> +/// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg
>> ()' checks
>> +/// this bit and addAvailable sets it if.
>> namespace {
>> class VISIBILITY_HIDDEN AvailableSpills {
>> const MRegisterInfo *MRI;
>> const TargetInstrInfo *TII;
>>
>> - // SpillSlotsAvailable - This map keeps track of all of the
>> spilled virtual
>> - // register values that are still available, due to being
>> loaded or stored to,
>> - // but not invalidated yet.
>> - std::map<int, unsigned> SpillSlotsAvailable;
>> + // SpillSlotsOrReMatsAvailable - This map keeps track of all of
>> the spilled
>> + // or remat'ed virtual register values that are still
>> available, due to being
>> + // loaded or stored to, but not invalidated yet.
>> + std::map<int, unsigned> SpillSlotsOrReMatsAvailable;
>>
>> - // PhysRegsAvailable - This is the inverse of
>> SpillSlotsAvailable, indicating
>> - // which stack slot values are currently held by a physreg.
>> This is used to
>> - // invalidate entries in SpillSlotsAvailable when a physreg is
>> modified.
>> + // PhysRegsAvailable - This is the inverse of
>> SpillSlotsOrReMatsAvailable,
>> + // indicating which stack slot values are currently held by a
>> physreg. This
>> + // is used to invalidate entries in SpillSlotsOrReMatsAvailable
>> when a
>> + // physreg is modified.
>> std::multimap<unsigned, int> PhysRegsAvailable;
>>
>> void disallowClobberPhysRegOnly(unsigned PhysReg);
>> @@ -295,41 +297,43 @@
>>
>> const MRegisterInfo *getRegInfo() const { return MRI; }
>>
>> - /// getSpillSlotPhysReg - If the specified stack slot is
>> available in a
>> - /// physical register, return that PhysReg, otherwise return 0.
>> - unsigned getSpillSlotPhysReg(int Slot) const {
>> - std::map<int, unsigned>::const_iterator I =
>> SpillSlotsAvailable.find(Slot);
>> - if (I != SpillSlotsAvailable.end()) {
>> + /// getSpillSlotOrReMatPhysReg - If the specified stack slot or
>> remat is
>> + /// available in a physical register, return that PhysReg,
>> otherwise
>> + /// return 0.
>> + unsigned getSpillSlotOrReMatPhysReg(int Slot) const {
>> + std::map<int, unsigned>::const_iterator I =
>> + SpillSlotsOrReMatsAvailable.find(Slot);
>> + if (I != SpillSlotsOrReMatsAvailable.end()) {
>> return I->second >> 1; // Remove the CanClobber bit.
>> }
>> return 0;
>> }
>>
>> - /// addAvailable - Mark that the specified stack slot is
>> available in the
>> - /// specified physreg. If CanClobber is true, the physreg can
>> be modified at
>> - /// any time without changing the semantics of the program.
>> - void addAvailable(int Slot, MachineInstr *MI, unsigned Reg,
>> + /// addAvailable - Mark that the specified stack slot / remat
>> is available in
>> + /// the specified physreg. If CanClobber is true, the physreg
>> can be modified
>> + /// at any time without changing the semantics of the program.
>> + void addAvailable(int SlotOrReMat, MachineInstr *MI, unsigned Reg,
>> bool CanClobber = true) {
>> // If this stack slot is thought to be available in some
>> other physreg,
>> // remove its record.
>> - ModifyStackSlot(Slot);
>> + ModifyStackSlotOrReMat(SlotOrReMat);
>>
>> - PhysRegsAvailable.insert(std::make_pair(Reg, Slot));
>> - SpillSlotsAvailable[Slot] = (Reg << 1) | (unsigned)CanClobber;
>> + PhysRegsAvailable.insert(std::make_pair(Reg, SlotOrReMat));
>> + SpillSlotsOrReMatsAvailable[SlotOrReMat] = (Reg << 1) |
>> (unsigned)CanClobber;
>>
>> - if (Slot > VirtRegMap::MAX_STACK_SLOT)
>> - DOUT << "Remembering RM#" << Slot-
>> VirtRegMap::MAX_STACK_SLOT-1;
>> + if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
>> + DOUT << "Remembering RM#" << SlotOrReMat-
>> VirtRegMap::MAX_STACK_SLOT-1;
>> else
>> - DOUT << "Remembering SS#" << Slot;
>> + DOUT << "Remembering SS#" << SlotOrReMat;
>> DOUT << " in physreg " << MRI->getName(Reg) << "\n";
>> }
>>
>> /// canClobberPhysReg - Return true if the spiller is allowed
>> to change the
>> /// value of the specified stackslot register if it desires.
>> The specified
>> /// stack slot must be available in a physreg for this query to
>> make sense.
>> - bool canClobberPhysReg(int Slot) const {
>> - assert(SpillSlotsAvailable.count(Slot) && "Slot not
>> available!");
>> - return SpillSlotsAvailable.find(Slot)->second & 1;
>> + bool canClobberPhysReg(int SlotOrReMat) const {
>> + assert(SpillSlotsOrReMatsAvailable.count(SlotOrReMat) &&
>> "Value not available!");
>> + return SpillSlotsOrReMatsAvailable.find(SlotOrReMat)->second
>> & 1;
>> }
>>
>> /// disallowClobberPhysReg - Unset the CanClobber bit of the
>> specified
>> @@ -342,10 +346,10 @@
>> /// it and any of its aliases.
>> void ClobberPhysReg(unsigned PhysReg);
>>
>> - /// ModifyStackSlot - This method is called when the value in a
>> stack slot
>> + /// ModifyStackSlotOrReMat - This method is called when the
>> value in a stack slot
>> /// changes. This removes information about which register the
>> previous value
>> /// for this slot lives in (as the previous value is dead now).
>> - void ModifyStackSlot(int Slot);
>> + void ModifyStackSlotOrReMat(int SlotOrReMat);
>> };
>> }
>>
>> @@ -356,11 +360,11 @@
>> std::multimap<unsigned, int>::iterator I =
>> PhysRegsAvailable.lower_bound(PhysReg);
>> while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
>> - int Slot = I->second;
>> + int SlotOrReMat = I->second;
>> I++;
>> - assert((SpillSlotsAvailable[Slot] >> 1) == PhysReg &&
>> + assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) ==
>> PhysReg &&
>> "Bidirectional map mismatch!");
>> - SpillSlotsAvailable[Slot] &= ~1;
>> + SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1;
>> DOUT << "PhysReg " << MRI->getName(PhysReg)
>> << " copied, it is available for use but can no longer
>> be modified\n";
>> }
>> @@ -381,17 +385,17 @@
>> std::multimap<unsigned, int>::iterator I =
>> PhysRegsAvailable.lower_bound(PhysReg);
>> while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
>> - int Slot = I->second;
>> + int SlotOrReMat = I->second;
>> PhysRegsAvailable.erase(I++);
>> - assert((SpillSlotsAvailable[Slot] >> 1) == PhysReg &&
>> + assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) ==
>> PhysReg &&
>> "Bidirectional map mismatch!");
>> - SpillSlotsAvailable.erase(Slot);
>> + SpillSlotsOrReMatsAvailable.erase(SlotOrReMat);
>> DOUT << "PhysReg " << MRI->getName(PhysReg)
>> << " clobbered, invalidating ";
>> - if (Slot > VirtRegMap::MAX_STACK_SLOT)
>> - DOUT << "RM#" << Slot-VirtRegMap::MAX_STACK_SLOT-1 << "\n";
>> + if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
>> + DOUT << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1
>> << "\n";
>> else
>> - DOUT << "SS#" << Slot << "\n";
>> + DOUT << "SS#" << SlotOrReMat << "\n";
>> }
>> }
>>
>> @@ -404,14 +408,14 @@
>> ClobberPhysRegOnly(PhysReg);
>> }
>>
>> -/// ModifyStackSlot - This method is called when the value in a
>> stack slot
>> +/// ModifyStackSlotOrReMat - This method is called when the value
>> in a stack slot
>> /// changes. This removes information about which register the
>> previous value
>> /// for this slot lives in (as the previous value is dead now).
>> -void AvailableSpills::ModifyStackSlot(int Slot) {
>> - std::map<int, unsigned>::iterator It = SpillSlotsAvailable.find
>> (Slot);
>> - if (It == SpillSlotsAvailable.end()) return;
>> +void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) {
>> + std::map<int, unsigned>::iterator It =
>> SpillSlotsOrReMatsAvailable.find(SlotOrReMat);
>> + if (It == SpillSlotsOrReMatsAvailable.end()) return;
>> unsigned Reg = It->second >> 1;
>> - SpillSlotsAvailable.erase(It);
>> + SpillSlotsOrReMatsAvailable.erase(It);
>>
>> // This register may hold the value of multiple stack slots,
>> only remove this
>> // stack slot from the set of values the register contains.
>> @@ -419,7 +423,7 @@
>> for (; ; ++I) {
>> assert(I != PhysRegsAvailable.end() && I->first == Reg &&
>> "Map inverse broken!");
>> - if (I->second == Slot) break;
>> + if (I->second == SlotOrReMat) break;
>> }
>> PhysRegsAvailable.erase(I);
>> }
>> @@ -490,8 +494,8 @@
>> // The MachineInstr operand that reused an available value.
>> unsigned Operand;
>>
>> - // StackSlot - The spill slot of the value being reused.
>> - unsigned StackSlot;
>> + // StackSlotOrReMat - The spill slot or remat id of the value
>> being reused.
>> + unsigned StackSlotOrReMat;
>>
>> // PhysRegReused - The physical register the value was
>> available in.
>> unsigned PhysRegReused;
>> @@ -504,7 +508,7 @@
>>
>> ReusedOp(unsigned o, unsigned ss, unsigned prr, unsigned apr,
>> unsigned vreg)
>> - : Operand(o), StackSlot(ss), PhysRegReused(prr),
>> AssignedPhysReg(apr),
>> + : Operand(o), StackSlotOrReMat(ss), PhysRegReused(prr),
>> AssignedPhysReg(apr),
>> VirtReg(vreg) {}
>> };
>>
>> @@ -525,7 +529,7 @@
>>
>> /// addReuse - If we choose to reuse a virtual register that
>> is already
>> /// available instead of reloading it, remember that we did so.
>> - void addReuse(unsigned OpNo, unsigned StackSlot,
>> + void addReuse(unsigned OpNo, unsigned StackSlotOrReMat,
>> unsigned PhysRegReused, unsigned AssignedPhysReg,
>> unsigned VirtReg) {
>> // If the reload is to the assigned register anyway, no
>> undo will be
>> @@ -533,7 +537,7 @@
>> if (PhysRegReused == AssignedPhysReg) return;
>>
>> // Otherwise, remember this.
>> - Reuses.push_back(ReusedOp(OpNo, StackSlot, PhysRegReused,
>> + Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat,
>> PhysRegReused,
>> AssignedPhysReg, VirtReg));
>> }
>>
>> @@ -553,7 +557,8 @@
>> std::map<int, MachineInstr*>
>> &MaybeDeadStores,
>> SmallSet<unsigned, 8> &Rejected,
>> BitVector &RegKills,
>> - std::vector<MachineOperand*>
>> &KillOps) {
>> + std::vector<MachineOperand*> &KillOps,
>> + VirtRegMap &VRM) {
>> if (Reuses.empty()) return PhysReg; // This is most often
>> empty.
>>
>> for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) {
>> @@ -569,7 +574,7 @@
>> unsigned NewReg = Op.AssignedPhysReg;
>> Rejected.insert(PhysReg);
>> return GetRegForReload(NewReg, MI, Spills,
>> MaybeDeadStores, Rejected,
>> - RegKills, KillOps);
>> + RegKills, KillOps, VRM);
>> } else {
>> // Otherwise, we might also have a problem if a
>> previously reused
>> // value aliases the new register. If so, codegen the
>> previous reload
>> @@ -595,20 +600,26 @@
>> // would prefer us to use a different register.
>> unsigned NewPhysReg = GetRegForReload
>> (NewOp.AssignedPhysReg,
>> MI, Spills,
>> MaybeDeadStores,
>> - Rejected,
>> RegKills, KillOps);
>> + Rejected, RegKills,
>> KillOps, VRM);
>>
>> - MRI->loadRegFromStackSlot(*MBB, MI, NewPhysReg,
>> - NewOp.StackSlot, AliasRC);
>> + if (NewOp.StackSlotOrReMat >
>> VirtRegMap::MAX_STACK_SLOT) {
>> + MRI->reMaterialize(*MBB, MI, NewPhysReg,
>> + VRM.getReMaterializedMI
>> (NewOp.VirtReg));
>> + ++NumReMats;
>> + } else {
>> + MRI->loadRegFromStackSlot(*MBB, MI, NewPhysReg,
>> + NewOp.StackSlotOrReMat,
>> AliasRC);
>> + ++NumLoads;
>> + }
>> Spills.ClobberPhysReg(NewPhysReg);
>> Spills.ClobberPhysReg(NewOp.PhysRegReused);
>>
>> // Any stores to this stack slot are not dead anymore.
>> - MaybeDeadStores.erase(NewOp.StackSlot);
>> + MaybeDeadStores.erase(NewOp.StackSlotOrReMat);
>>
>> MI->getOperand(NewOp.Operand).setReg(NewPhysReg);
>>
>> - Spills.addAvailable(NewOp.StackSlot, MI, NewPhysReg);
>> - ++NumLoads;
>> + Spills.addAvailable(NewOp.StackSlotOrReMat, MI,
>> NewPhysReg);
>> MachineBasicBlock::iterator MII = MI;
>> --MII;
>> UpdateKills(*MII, RegKills, KillOps);
>> @@ -640,10 +651,11 @@
>> AvailableSpills &Spills,
>> std::map<int, MachineInstr*>
>> &MaybeDeadStores,
>> BitVector &RegKills,
>> - std::vector<MachineOperand*>
>> &KillOps) {
>> + std::vector<MachineOperand*> &KillOps,
>> + VirtRegMap &VRM) {
>> SmallSet<unsigned, 8> Rejected;
>> return GetRegForReload(PhysReg, MI, Spills,
>> MaybeDeadStores, Rejected,
>> - RegKills, KillOps);
>> + RegKills, KillOps, VRM);
>> }
>> };
>> }
>> @@ -651,8 +663,7 @@
>>
>> /// rewriteMBB - Keep track of which spills are available even
>> after the
>> /// register allocator is done with them. If possible, avoid
>> reloading vregs.
>> -void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap
>> &VRM,
>> - std::vector<MachineInstr*>
>> &ReMatedMIs) {
>> +void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap
>> &VRM) {
>> DOUT << MBB.getBasicBlock()->getName() << ":\n";
>>
>> // Spills - Keep track of which spilled values are available in
>> physregs so
>> @@ -689,28 +700,6 @@
>> // Loop over all of the implicit defs, clearing them from our
>> available
>> // sets.
>> const TargetInstrDescriptor *TID = MI.getInstrDescriptor();
>> -
>> - // If this instruction is being rematerialized, just remove it!
>> - int FrameIdx;
>> - if (TII->isTriviallyReMaterializable(&MI) ||
>> - TII->isLoadFromStackSlot(&MI, FrameIdx)) {
>> - Erased = true;
>> - for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
>> - MachineOperand &MO = MI.getOperand(i);
>> - if (!MO.isRegister() || MO.getReg() == 0)
>> - continue; // Ignore non-register operands.
>> - if (MO.isDef() && !VRM.isReMaterialized(MO.getReg())) {
>> - Erased = false;
>> - break;
>> - }
>> - }
>> - if (Erased) {
>> - VRM.RemoveFromFoldedVirtMap(&MI);
>> - ReMatedMIs.push_back(MI.removeFromParent());
>> - goto ProcessNextInst;
>> - }
>> - }
>> -
>> if (TID->ImplicitDefs) {
>> const unsigned *ImpDef = TID->ImplicitDefs;
>> for ( ; *ImpDef; ++ImpDef) {
>> @@ -738,7 +727,7 @@
>> "Not a virtual or a physical register?");
>>
>> unsigned VirtReg = MO.getReg();
>> - if (!VRM.hasStackSlot(VirtReg)) {
>> + if (VRM.isAssignedReg(VirtReg)) {
>> // This virtual register was assigned a physreg!
>> unsigned Phys = VRM.getPhys(VirtReg);
>> MF.setPhysRegUsed(Phys);
>> @@ -752,12 +741,13 @@
>> if (!MO.isUse())
>> continue; // Handle defs in the loop below (handle
>> use&def here though)
>>
>> - bool doReMat = VRM.isReMaterialized(VirtReg);
>> - int StackSlot = VRM.getStackSlot(VirtReg);
>> + bool DoReMat = VRM.isReMaterialized(VirtReg);
>> + int SSorRMId = DoReMat
>> + ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
>> unsigned PhysReg;
>>
>> // Check to see if this stack slot is available.
>> - if ((PhysReg = Spills.getSpillSlotPhysReg(StackSlot))) {
>> + if ((PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId))) {
>> // This spilled operand might be part of a two-address
>> operand. If this
>> // is the case, then changing it will necessarily require
>> changing the
>> // def part of the instruction as well. However, in some
>> cases, we
>> @@ -771,16 +761,16 @@
>> // Okay, we have a two address operand. We can reuse
>> this physreg as
>> // long as we are allowed to clobber the value and
>> there isn't an
>> // earlier def that has already clobbered the physreg.
>> - CanReuse = Spills.canClobberPhysReg(StackSlot) &&
>> + CanReuse = Spills.canClobberPhysReg(SSorRMId) &&
>> !ReusedOperands.isClobbered(PhysReg);
>> }
>>
>> if (CanReuse) {
>> // If this stack slot value is already available, reuse
>> it!
>> - if (StackSlot > VirtRegMap::MAX_STACK_SLOT)
>> - DOUT << "Reusing RM#" << StackSlot-
>> VirtRegMap::MAX_STACK_SLOT-1;
>> + if (SSorRMId > VirtRegMap::MAX_STACK_SLOT)
>> + DOUT << "Reusing RM#" << SSorRMId-
>> VirtRegMap::MAX_STACK_SLOT-1;
>> else
>> - DOUT << "Reusing SS#" << StackSlot;
>> + DOUT << "Reusing SS#" << SSorRMId;
>> DOUT << " from physreg "
>> << MRI->getName(PhysReg) << " for vreg"
>> << VirtReg <<" instead of reloading into physreg "
>> @@ -801,7 +791,7 @@
>> // or R0 and R1 might not be compatible with each
>> other. In this
>> // case, we actually insert a reload for V1 in R1,
>> ensuring that
>> // we can get at R0 or its alias.
>> - ReusedOperands.addReuse(i, StackSlot, PhysReg,
>> + ReusedOperands.addReuse(i, SSorRMId, PhysReg,
>> VRM.getPhys(VirtReg), VirtReg);
>> if (ti != -1)
>> // Only mark it clobbered if this is a use&def operand.
>> @@ -829,16 +819,16 @@
>> // reuser.
>> if (ReusedOperands.hasReuses())
>> DesignatedReg = ReusedOperands.GetRegForReload
>> (DesignatedReg, &MI,
>> - Spills, MaybeDeadStores,
>> RegKills, KillOps);
>> + Spills, MaybeDeadStores, RegKills,
>> KillOps, VRM);
>>
>> // If the mapped designated register is actually the
>> physreg we have
>> // incoming, we don't need to inserted a dead copy.
>> if (DesignatedReg == PhysReg) {
>> // If this stack slot value is already available, reuse
>> it!
>> - if (StackSlot > VirtRegMap::MAX_STACK_SLOT)
>> - DOUT << "Reusing RM#" << StackSlot-
>> VirtRegMap::MAX_STACK_SLOT-1;
>> + if (SSorRMId > VirtRegMap::MAX_STACK_SLOT)
>> + DOUT << "Reusing RM#" << SSorRMId-
>> VirtRegMap::MAX_STACK_SLOT-1;
>> else
>> - DOUT << "Reusing SS#" << StackSlot;
>> + DOUT << "Reusing SS#" << SSorRMId;
>> DOUT << " from physreg " << MRI->getName(PhysReg) << "
>> for vreg"
>> << VirtReg
>> << " instead of reloading into same physreg.\n";
>> @@ -859,7 +849,7 @@
>> // This invalidates DesignatedReg.
>> Spills.ClobberPhysReg(DesignatedReg);
>>
>> - Spills.addAvailable(StackSlot, &MI, DesignatedReg);
>> + Spills.addAvailable(SSorRMId, &MI, DesignatedReg);
>> MI.getOperand(i).setReg(DesignatedReg);
>> DOUT << '\t' << *prior(MII);
>> ++NumReused;
>> @@ -877,24 +867,24 @@
>> // reuser.
>> if (ReusedOperands.hasReuses())
>> PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI,
>> - Spills, MaybeDeadStores,
>> RegKills, KillOps);
>> + Spills, MaybeDeadStores, RegKills,
>> KillOps, VRM);
>>
>> MF.setPhysRegUsed(PhysReg);
>> ReusedOperands.markClobbered(PhysReg);
>> - if (doReMat) {
>> + if (DoReMat) {
>> MRI->reMaterialize(MBB, &MI, PhysReg,
>> VRM.getReMaterializedMI(VirtReg));
>> ++NumReMats;
>> } else {
>> - MRI->loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC);
>> + MRI->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC);
>> ++NumLoads;
>> }
>> // This invalidates PhysReg.
>> Spills.ClobberPhysReg(PhysReg);
>>
>> // Any stores to this stack slot are not dead anymore.
>> - if (!doReMat)
>> - MaybeDeadStores.erase(StackSlot);
>> - Spills.addAvailable(StackSlot, &MI, PhysReg);
>> + if (!DoReMat)
>> + MaybeDeadStores.erase(SSorRMId);
>> + Spills.addAvailable(SSorRMId, &MI, PhysReg);
>> // Assumes this is the last use. IsKill will be unset if
>> reg is reused
>> // unless it's a two-address operand.
>> if (TID->getOperandConstraint(i, TOI::TIED_TO) == -1)
>> @@ -914,7 +904,7 @@
>> << I->second.second;
>> unsigned VirtReg = I->second.first;
>> VirtRegMap::ModRef MR = I->second.second;
>> - if (!VRM.hasStackSlot(VirtReg)) {
>> + if (VRM.isAssignedReg(VirtReg)) {
>> DOUT << ": No stack slot!\n";
>> continue;
>> }
>> @@ -929,7 +919,7 @@
>> if (FrameIdx == SS) {
>> // If this spill slot is available, turn it into a
>> copy (or nothing)
>> // instead of leaving it as a load!
>> - if (unsigned InReg = Spills.getSpillSlotPhysReg(SS)) {
>> + if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg
>> (SS)) {
>> DOUT << "Promoted Load To Copy: " << MI;
>> if (DestReg != InReg) {
>> MRI->copyRegToReg(MBB, &MI, DestReg, InReg,
>> @@ -974,7 +964,7 @@
>> // the value, the value is not available anymore.
>> if (MR & VirtRegMap::isMod) {
>> // Notice that the value in this stack slot has been
>> modified.
>> - Spills.ModifyStackSlot(SS);
>> + Spills.ModifyStackSlotOrReMat(SS);
>>
>> // If this is *just* a mod of the value, check to see if
>> this is just a
>> // store to the spill slot (i.e. the spill got merged
>> into the copy). If
>> @@ -1053,7 +1043,7 @@
>> // Another def has taken the assigned physreg. It
>> must have been a
>> // use&def which got it due to reuse. Undo the reuse!
>> PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI,
>> - Spills, MaybeDeadStores,
>> RegKills, KillOps);
>> + Spills, MaybeDeadStores, RegKills,
>> KillOps, VRM);
>> }
>> }
>>
>> @@ -1077,7 +1067,7 @@
>> // If the stack slot value was previously available in
>> some other
>> // register, change it now. Otherwise, make the register
>> available,
>> // in PhysReg.
>> - Spills.ModifyStackSlot(StackSlot);
>> + Spills.ModifyStackSlotOrReMat(StackSlot);
>> Spills.ClobberPhysReg(PhysReg);
>> Spills.addAvailable(StackSlot, LastStore, PhysReg);
>> ++NumStores;
>>
>> Modified: llvm/trunk/lib/CodeGen/VirtRegMap.h
>> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/
>> VirtRegMap.h?rev=41060&r1=41059&r2=41060&view=diff
>>
>> =====================================================================
>> =========
>> --- llvm/trunk/lib/CodeGen/VirtRegMap.h (original)
>> +++ llvm/trunk/lib/CodeGen/VirtRegMap.h Mon Aug 13 18:45:17 2007
>> @@ -55,6 +55,7 @@
>> /// which corresponds to the stack slot this register is spilled
>> /// at.
>> IndexedMap<int, VirtReg2IndexFunctor> Virt2StackSlotMap;
>> + IndexedMap<int, VirtReg2IndexFunctor> Virt2ReMatIdMap;
>> /// MI2VirtMap - This is MachineInstr to virtual register
>> /// mapping. In the case of memory spill code being folded into
>> /// instructions, we need to know which virtual register was
>> @@ -64,7 +65,7 @@
>> /// ReMatMap - This is virtual register to re-materialized
>> instruction
>> /// mapping. Each virtual register whose definition is going
>> to be
>> /// re-materialized has an entry in it.
>> - std::map<unsigned, const MachineInstr*> ReMatMap;
>> + IndexedMap<MachineInstr*, VirtReg2IndexFunctor> ReMatMap;
>>
>> /// ReMatId - Instead of assigning a stack slot to a to be
>> rematerialized
>> /// virtual register, an unique id is being assigned. This
>> keeps track of
>> @@ -119,10 +120,11 @@
>> grow();
>> }
>>
>> - /// @brief returns true is the specified virtual register is
>> - /// mapped to a stack slot
>> - bool hasStackSlot(unsigned virtReg) const {
>> - return getStackSlot(virtReg) != NO_STACK_SLOT;
>> + /// @brief returns true is the specified virtual register is not
>> + /// mapped to a stack slot or rematerialized.
>> + bool isAssignedReg(unsigned virtReg) const {
>> + return getStackSlot(virtReg) == NO_STACK_SLOT &&
>> + getReMatId(virtReg) == NO_STACK_SLOT;
>> }
>>
>> /// @brief returns the stack slot mapped to the specified
>> virtual
>> @@ -132,6 +134,13 @@
>> return Virt2StackSlotMap[virtReg];
>> }
>>
>> + /// @brief returns the rematerialization id mapped to the
>> specified virtual
>> + /// register
>> + int getReMatId(unsigned virtReg) const {
>> + assert(MRegisterInfo::isVirtualRegister(virtReg));
>> + return Virt2ReMatIdMap[virtReg];
>> + }
>> +
>> /// @brief create a mapping for the specifed virtual register to
>> /// the next available stack slot
>> int assignVirt2StackSlot(unsigned virtReg);
>> @@ -142,22 +151,26 @@
>> /// @brief assign an unique re-materialization id to the
>> specified
>> /// virtual register.
>> int assignVirtReMatId(unsigned virtReg);
>> + /// @brief assign an unique re-materialization id to the
>> specified
>> + /// virtual register.
>> + void assignVirtReMatId(unsigned virtReg, int id);
>>
>> /// @brief returns true if the specified virtual register is
>> being
>> /// re-materialized.
>> bool isReMaterialized(unsigned virtReg) const {
>> - return ReMatMap.count(virtReg) != 0;
>> + return ReMatMap[virtReg] != NULL;
>> }
>>
>> /// @brief returns the original machine instruction being re-
>> issued
>> /// to re-materialize the specified virtual register.
>> - const MachineInstr *getReMaterializedMI(unsigned virtReg) {
>> + MachineInstr *getReMaterializedMI(unsigned virtReg) const {
>> return ReMatMap[virtReg];
>> }
>>
>> /// @brief records the specified virtual register will be
>> /// re-materialized and the original instruction which will
>> be re-issed
>> - /// for this purpose.
>> + /// for this purpose. If parameter all is true, then all
>> uses of the
>> + /// registers are rematerialized and it's safe to delete the
>> definition.
>> void setVirtIsReMaterialized(unsigned virtReg, MachineInstr
>> *def) {
>> ReMatMap[virtReg] = def;
>> }
>>
>>
>> _______________________________________________
>> llvm-commits mailing list
>> llvm-commits at cs.uiuc.edu
>> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>>
>> <bugpoint-reduced-simplified.bc>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
More information about the llvm-commits
mailing list