[llvm-commits] CVS: llvm/lib/Reoptimizer/Inst/lib/InstManip.h PhaseInfo.h Phases.cpp SparcInstManip.cpp SparcInstManip.h
Joel Stanley
jstanley at cs.uiuc.edu
Wed May 7 21:41:01 PDT 2003
Changes in directory llvm/lib/Reoptimizer/Inst/lib:
InstManip.h updated: 1.14 -> 1.15
PhaseInfo.h updated: 1.1 -> 1.2
Phases.cpp updated: 1.23 -> 1.24
SparcInstManip.cpp updated: 1.3 -> 1.4
SparcInstManip.h updated: 1.4 -> 1.5
---
Log message:
Cleaned up class design. Phase 5 slot is now being generated and branched to.
---
Diffs of the changes:
Index: llvm/lib/Reoptimizer/Inst/lib/InstManip.h
diff -u llvm/lib/Reoptimizer/Inst/lib/InstManip.h:1.14 llvm/lib/Reoptimizer/Inst/lib/InstManip.h:1.15
--- llvm/lib/Reoptimizer/Inst/lib/InstManip.h:1.14 Wed Apr 30 14:21:06 2003
+++ llvm/lib/Reoptimizer/Inst/lib/InstManip.h Wed May 7 20:50:06 2003
@@ -29,9 +29,11 @@
class TraceCache;
class Phase2;
class Phase3;
+class Phase4;
class InstCandidate;
class Phase3Info;
class Phase4Info;
+struct PrimInfo;
class InstManip
{
@@ -58,14 +60,24 @@
// in this manner because there are not really enough phases to warrant building the
// slot-building behavior into the Phase{3,4}Info classes themselves.
+ // For the phase 3 slot
virtual void buildSlot(Phase3Info* p3info,
std::vector<unsigned>& snippet) = 0;
+ // For the phase 4 slot
virtual void buildSlot(Phase4Info* p4info,
std::vector<unsigned>& snippet) = 0;
+ // For the phase 5 slot
+ virtual void buildSlot(PrimInfo* pi,
+ uint64_t slotBase,
+ uint64_t instAddr,
+ uint64_t startAddr,
+ std::vector<unsigned>& snippet) = 0;
+
virtual unsigned getSlotSize(Phase2* p2) const = 0;
virtual unsigned getSlotSize(Phase3* p3, InstCandidate& cand) const = 0;
+ virtual unsigned getSlotSize(Phase4* p4) const = 0;
// findCandidates - Build the vector of instruction candidates that occur in the
// region defined by the given addresses. This is necessarily a platform-dependent
Index: llvm/lib/Reoptimizer/Inst/lib/PhaseInfo.h
diff -u llvm/lib/Reoptimizer/Inst/lib/PhaseInfo.h:1.1 llvm/lib/Reoptimizer/Inst/lib/PhaseInfo.h:1.2
--- llvm/lib/Reoptimizer/Inst/lib/PhaseInfo.h:1.1 Wed Apr 30 16:28:07 2003
+++ llvm/lib/Reoptimizer/Inst/lib/PhaseInfo.h Wed May 7 20:50:06 2003
@@ -15,6 +15,12 @@
typedef std::pair<uint64_t, uint64_t> AddressRange;
+typedef struct PrimInfo {
+ unsigned gbtType;
+ unsigned short* loadVar;
+ unsigned gbtStartIdx;
+};
+
class Phase3Info
{
public:
@@ -70,11 +76,13 @@
{
public:
Phase4Info(const InstCandidate& candidate,
+ uint64_t startAddr,
uint64_t slotDescriptor,
uint64_t slotSize,
TraceCache* pTC,
InstManip* pIM):
m_candidate(candidate),
+ m_startAddr(startAddr),
m_slotDescriptor(slotDescriptor),
m_slotSize(slotSize),
m_pTC(pTC),
@@ -83,6 +91,7 @@
}
const InstCandidate& getCandidate() const { return m_candidate; }
+ uint64_t getStartAddr() const { return m_startAddr; }
uint64_t getSlot() const { return m_slotDescriptor; }
uint64_t getSlotSize() const { return m_slotSize; }
TraceCache* getTraceCache() { return m_pTC; }
@@ -92,11 +101,11 @@
Phase4Info() {}
InstCandidate m_candidate; // Candidate responsible for this instance's creation
+ uint64_t m_startAddr; // Start address of enclosing function
uint64_t m_slotDescriptor; // Slot created by phase 3
unsigned m_slotSize; // Size of slot created by phase 3
TraceCache* m_pTC; // TraceCache instance used by phases 2 and 3
InstManip* m_pIM; // The InstManip instance to pass to the next phase
};
-
#endif // _INCLUDED_PHASEINFO_H
Index: llvm/lib/Reoptimizer/Inst/lib/Phases.cpp
diff -u llvm/lib/Reoptimizer/Inst/lib/Phases.cpp:1.23 llvm/lib/Reoptimizer/Inst/lib/Phases.cpp:1.24
--- llvm/lib/Reoptimizer/Inst/lib/Phases.cpp:1.23 Sun May 4 16:16:17 2003
+++ llvm/lib/Reoptimizer/Inst/lib/Phases.cpp Wed May 7 20:50:06 2003
@@ -66,6 +66,14 @@
#include "PhaseInfo.h"
#include "SparcInstManip.h"
+#define DEBUG 1
+
+#if DEBUG
+#define DEBUG_MSG(x) std::cerr << x << std::endl
+#else
+#define DEBUG_MSG(x)
+#endif
+
using std::vector;
using std::cerr;
using std::endl;
@@ -78,12 +86,6 @@
// obtained in the same manner.
extern unsigned ppGBTSize;
-typedef struct PrimInfo {
- unsigned gbtType;
- unsigned short* loadVar;
- unsigned gbtStartIdx;
-};
-
extern PrimInfo ppGBT[];
typedef std::pair<uint64_t, uint64_t> AddressRange;
@@ -167,10 +169,10 @@
void Phase2::transform()
{
- cerr << "============================== Begin Phase 2 ==============================\n";
+ DEBUG_MSG("============================== Begin Phase 2 ==============================");
const char* execName = getexecname();
- cerr << "Executable name is: " << execName << endl;
+ DEBUG_MSG("Executable name is: " << execName);
ElfReader elfReader(execName);
@@ -185,7 +187,7 @@
while(elfReader.findNextSymbol(funcName, range, m_pIM->getInstWidth()))
funcs.push_back(std::make_pair(funcName, range));
- cerr << "There are " << funcs.size() << " functions to process." << endl << endl;
+ DEBUG_MSG("There are " << funcs.size() << " functions to process." << endl);
m_pIM->makePhase3SpillRegion(funcs.size());
@@ -197,13 +199,13 @@
//cerr << i->first << " is to be transformed" << endl;
if(i->first == "fibs") {
- cerr << "Transforming function " << i->first << "..." << endl;
+ DEBUG_MSG("Transforming function " << i->first << "...");
transformFunction(i->second);
}
}
}
- cerr << "============================== End Phase 2 ==============================\n";
+ DEBUG_MSG("============================== End Phase 2 ===========================");
}
@@ -234,6 +236,14 @@
return slotBase;
}
+static void dumpSnippet(vector<unsigned>& snippet, InstManip* im)
+{
+ for(vector<unsigned>::iterator j = snippet.begin(), k = snippet.end(); j != k; ++j) {
+ im->printInst(*j);
+ cerr << endl;
+ }
+}
+
void Phase2::transformFunction(AddressRange& range)
{
// Obtain address of first replacable instruction in function and obtain a new slot
@@ -260,12 +270,10 @@
vector<unsigned> snippet;
m_pIM->buildSlot(p3info, snippet);
- // Dump snippet instructions:
- cerr << "phase3 slot instructions:" << endl;
- for(vector<unsigned>::iterator j = snippet.begin(), k = snippet.end(); j != k; ++j) {
- m_pIM->printInst(*j);
- cerr << endl;
- }
+#if DEBUG
+ DEBUG_MSG("phase3 slot instructions:");
+ dumpSnippet(snippet, m_pIM);
+#endif
// Copy the snippet code into the slot
copySnippetToSlot(snippet, slotBase, vm, m_pIM);
@@ -284,9 +292,9 @@
m_pTC(p3info->getTraceCache()),
m_pIM(p3info->getIM())
{
- cerr << "================ Begin Phase 3 [" << std::hex
- << m_pPhase3Info->getStartAddr() << ", " << m_pPhase3Info->getEndAddr()
- << "] ================\n";
+ DEBUG_MSG("================ Begin Phase 3 [" << std::hex
+ << m_pPhase3Info->getStartAddr() << ", " << m_pPhase3Info->getEndAddr()
+ << "] ================");
// 1. Replace the original (replaced) instruction at the proper location in the
// original code (thus effectively removing the branch to the slot created by phase 2
@@ -318,8 +326,10 @@
// For each load candidate, obtain a new slot and write the phase 4 slot region
// contents into it.
+ DEBUG_MSG("There are " << candidates.size() << " candidates to process");
+
for(vector<InstCandidate>::iterator i = candidates.begin(), e = candidates.end(); i != e; ++i) {
- cerr << "Transforming " << *i << endl;
+ DEBUG_MSG("Transforming " << *i);
unsigned slotSize = m_pIM->getSlotSize(this, *i);
// Replace load candidate instruction with a branch to the start of a new slot.
@@ -328,17 +338,16 @@
// Build the Phase4Info structure and generate the phase 4 slot.
- Phase4Info* p4info = new Phase4Info(*i, slotBase, slotSize, m_pTC, m_pIM);
+ Phase4Info* p4info = new Phase4Info(*i, m_pPhase3Info->getStartAddr(),
+ slotBase, slotSize, m_pTC, m_pIM);
vector<unsigned> snippet;
m_pIM->buildSlot(p4info, snippet);
- // Dump snippet instructions:
- cerr << "phase4 slot instructions:" << endl;
- for(vector<unsigned>::iterator j = snippet.begin(), k = snippet.end(); j != k; ++j) {
- m_pIM->printInst(*j);
- cerr << endl;
- }
+#if DEBUG
+ DEBUG_MSG("phase4 slot instructions:");
+ dumpSnippet(snippet, m_pIM);
+#endif
// Copy the snippet code into the slot
copySnippetToSlot(snippet, slotBase, m_pTC->getVM(), m_pIM);
@@ -358,14 +367,13 @@
// ...and process them
processCandidates(candidates);
- cerr << "============================== End Phase 3 ==============================\n";
+ DEBUG_MSG("============================== End Phase 3 ==============================");
}
//////////////// Phase4 implementation ////////////////
void phase4(uint64_t tag, Phase4Info* p4info)
{
- cerr << "phase 4 fcn, tag is " << tag << endl;
Phase4 p4(tag, p4info);
p4.transform();
}
@@ -376,8 +384,7 @@
m_pIM(p4info->getIM()),
m_tag(tag)
{
- cerr << "phase4 ctor: tag is " << tag << endl;
- cerr << "================ Begin Phase 4 ================\n";
+ DEBUG_MSG("================ Begin Phase 4 ================");
}
Phase4::~Phase4()
@@ -420,39 +427,47 @@
return 0;
}
-void fakeInstFunc(double* param)
-{
- cerr << "I AM AN INSTRUMENTATION FUNCTION, FEAR ME!" << endl;
- *param = 3.14;
-}
-
void Phase4::transform()
{
- cerr << "tag is " << m_tag << endl;
+
+#if DEBUG
+ DEBUG_MSG("tag is " << m_tag);
dumpGBT(cerr);
+#endif
if(PrimInfo* pi = searchGBT(m_tag)) {
- cerr << "Tag matches." << endl;
+ DEBUG_MSG("Tag matches.");
const InstCandidate& cand = m_pPhase4Info->getCandidate();
-#if 0
- // Make a new slot that calls the instrumentation function, inserting a branch to
- // it over the original code.
-
- uint64_t slotBase = replaceInstWithBrToSlot(cand.front().first, getSlotSize(),
- m_pTC, m_instManip);
-#endif
+ assert(cand.getInsts().size() >= 2
+ && "Unexpected number of instructions in candidate");
// Write NOPs over the original instructions that were associated with the elected
- // candidate, but leave the branch instruction intact.
+ // candidate. No need to no-op over the candidate load instruction itself since
+ // we're about to write over it with a branch to the phase 5 slot.
VirtualMem* vm = m_pTC->getVM();
for(vector<std::pair<uint64_t, unsigned> >::const_iterator i = cand.getInsts().begin() + 1,
e = cand.getInsts().end(); i != e; ++i)
vm->writeInstToVM(i->first, m_pIM->getNOP());
- // Write the instructions to call the instrumentation function
+ // Obtain memory (& rewrite branch) to the phase 5 slot.
+
+ unsigned slotSize = m_pIM->getSlotSize(this);
+ uint64_t repAddr = cand.front().first;
+ uint64_t slotBase = replaceInstWithBrToSlot(repAddr, slotSize, m_pTC, m_pIM);
+ vector<unsigned> snippet;
+ m_pIM->buildSlot(pi, slotBase, repAddr, m_pPhase4Info->getStartAddr(), snippet);
+
+#if DEBUG
+ DEBUG_MSG("phase 5 slot contents: ");
+ dumpSnippet(snippet, m_pIM);
+#endif
+
+ copySnippetToSlot(snippet, slotBase, m_pTC->getVM(), m_pIM);
+
+#if 0
void* instFuncVP = (void*) fakeInstFunc; // From the GBT eventually
void (*instFunc)(void*) = (void (*)(void*)) instFuncVP;
@@ -460,9 +475,11 @@
instFunc(mem);
printf("%f\n", *((double*) mem));
free(mem);
+
+#endif
}
else {
- cerr << "Could not find tag" << endl;
+ DEBUG_MSG("Could not find tag");
// The candidate failed to get elected, so pack up and go home. Restore the
// replaced instruction (i.e. the branch that invoked this code) with the original
// instruction at that location.
@@ -472,11 +489,20 @@
m_pPhase4Info->getCandidate().front().second);
}
+#if 0
// (TEMP) For now, restore the candidate load to its original position for debugging
// purposes.
m_pPhase4Info->getTraceCache()->getVM()->writeInstToVM(m_pPhase4Info->getCandidate().front().first,
m_pPhase4Info->getCandidate().front().second);
+#endif
+
+ DEBUG_MSG("================ End Phase 4 ================");
+}
- cerr << "================ End Phase 4 ================\n";
+//////////////// Phase 5 implementation ////////////////
+
+void phase5(PrimInfo* pi)
+{
+ DEBUG_MSG("phase5 function invoked");
}
Index: llvm/lib/Reoptimizer/Inst/lib/SparcInstManip.cpp
diff -u llvm/lib/Reoptimizer/Inst/lib/SparcInstManip.cpp:1.3 llvm/lib/Reoptimizer/Inst/lib/SparcInstManip.cpp:1.4
--- llvm/lib/Reoptimizer/Inst/lib/SparcInstManip.cpp:1.3 Wed Apr 30 12:31:47 2003
+++ llvm/lib/Reoptimizer/Inst/lib/SparcInstManip.cpp Wed May 7 20:50:06 2003
@@ -47,10 +47,10 @@
#include "SparcInstManip.h"
#include "PhaseInfo.h"
+// These are exported due to some inline methods in SparcInstManip.h
+uint64_t SparcInstManip::sm_phase4SpillRegion[SparcInstManip::SHARED_SIZE];
const unsigned SparcInstManip::BRANCH_ALWAYS_BASE = 0x10480000;
const unsigned SparcInstManip::BRANCH_ALWAYS_BASE_ANNUL = 0x30480000;
-const unsigned SparcInstManip::BIAS = 2047;
-uint64_t SparcInstManip::sm_phase4SpillRegion[SparcInstManip::SHARED_SIZE];
using std::cout;
using std::cerr;
@@ -59,6 +59,7 @@
void phase3(Phase3Info* p3info);
void phase4(uint64_t tag, Phase4Info* p4info);
+void phase5(PrimInfo* pi);
SparcInstManip::SparcInstManip(TraceCache* tc):
InstManip(tc, SHARED_SIZE, INST_WIDTH, NOP_INST),
@@ -149,6 +150,35 @@
"Snippet size does not match expected slot size");
}
+void SparcInstManip::buildSlot(PrimInfo* pi,
+ uint64_t slotBase,
+ uint64_t instAddr,
+ uint64_t startAddr,
+ std::vector<unsigned>& snippet)
+{
+ // Before we generate code to spill the shared registers, we must first search
+ // backwards from the instrumentation site (i.e., the branch to the slot, formerly the
+ // load-volatile) to discover a marker alloca that will tell us the correct offset in
+ // the current stack frame. startAddr is the starting address of the function
+ // containing the instrumentation site. We will search backwards from instAddr for a
+ // valid alloca marker, stopping at startAddr if we do not encounter one.
+
+ unsigned offset = findAllocaOffset(instAddr, startAddr);
+
+ startCode(snippet);
+
+ generateSave();
+ generateCall((uint64_t) &phase5, slotBase);
+
+ // We need to branch back to one instruction beyond instruction that branches to the
+ // phase 5 slot.
+ generateBranchAlways(instAddr + getInstWidth(), slotBase, getRestoreInst());
+
+ endCode();
+
+ // TODO: Add assert against against the snippet.
+}
+
unsigned SparcInstManip::getSlotSize(Phase2* p2) const
{
// The following sum corresponds to the sizes consumed by the various regions of the
@@ -157,12 +187,12 @@
(void) p2;
- return getGenSaveSize() +
- getGenSpillSharedSize() +
- getGenLoadSize() +
- getGenCallSize() +
- getGenRestoreSharedSize() +
- getGenBranchAlwaysSize();
+ return GEN_SAVE_SIZE +
+ GEN_SPL_SHARED_SIZE +
+ GEN_LOAD_SIZE +
+ GEN_CALL_SIZE +
+ GEN_RESTR_SHARED_SIZE +
+ GEN_BRANCH_ALWAYS_SIZE;
}
unsigned SparcInstManip::getSlotSize(Phase3* p3, InstCandidate& cand) const
@@ -173,15 +203,26 @@
(void) p3;
- return getGenSaveSize() +
+ return GEN_SAVE_SIZE +
getGenAddressCopySize(cand.front().second) +
- getGenParamStoreSize() +
- getGenSpillSharedSize() +
- getGenLoadSize() +
- getGenParamStoreSize() +
- getGenCallSize() +
- getGenRestoreSharedSize() +
- getGenBranchAlwaysSize();
+ GEN_PSTORE_SIZE +
+ GEN_SPL_SHARED_SIZE +
+ GEN_LOAD_SIZE +
+ GEN_PSTORE_SIZE +
+ GEN_CALL_SIZE +
+ GEN_RESTR_SHARED_SIZE +
+ GEN_BRANCH_ALWAYS_SIZE;
+}
+
+unsigned SparcInstManip::getSlotSize(Phase4* p4) const
+{
+ // The following sum corresponds to the sizes consumed by the various regions of the
+ // the slot constructed by phase 4, called the phase 5 slot. See ASCII diagram of
+ // phase 5 slot contents for details.
+
+ (void) p4;
+
+ return GEN_SAVE_SIZE + GEN_CALL_SIZE + GEN_BRANCH_ALWAYS_SIZE;
}
void SparcInstManip::findCandidates(uint64_t start,
@@ -233,8 +274,8 @@
}
void SparcInstManip::generateLoad(uint64_t value,
- LogicalRegister dest,
- LogicalRegister tmp)
+ LogicalRegister dest,
+ LogicalRegister tmp)
{
// When reg == REG_0, load the 64-bit value into %o0, using %o0 and %o1.
// When reg == REG_1, load the 64-bit value into %o1, using %o1 and %o2.
@@ -266,13 +307,13 @@
// add %destReg, (lwr 10b of lwr wrd), %destReg
snippet.push_back(MK_ADD_R_I(destReg, destReg, LOW10(LOWWORD(value))));
- assert(snippet.size() - initSize == getGenLoadSize() &&
+ assert(snippet.size() - initSize == GEN_LOAD_SIZE &&
"Unexpected number of instructions in code sequence for 64-bit value -> %dest");
}
void SparcInstManip::generateAddressCopy(unsigned loadInst,
- LogicalRegister dest,
- bool afterSave)
+ LogicalRegister dest,
+ bool afterSave)
{
// NB: After save instruction has been issued, the output registers are mapped to the
// input registers.
@@ -317,7 +358,7 @@
snippet.push_back(MK_STX_STACK(srcReg, BIAS + off));
- assert(snippet.size() - initSize == getGenParamStoreSize() &&
+ assert(snippet.size() - initSize == GEN_PSTORE_SIZE &&
"Unexpected number of instructions in code sequence for parameter store");
}
@@ -336,7 +377,7 @@
snippet.push_back(getCallInst(dest, callInstAddr));
snippet.push_back(getNOP());
- assert(snippet.size() - initSize == getGenCallSize() &&
+ assert(snippet.size() - initSize == GEN_CALL_SIZE &&
"Unexpected number of instructions in code sequence for call");
}
@@ -355,7 +396,7 @@
snippet.push_back(getRestoreInst());
- assert(snippet.size() - initSize == getGenRestoreSize() &&
+ assert(snippet.size() - initSize == GEN_RESTORE_SIZE &&
"Unexpected number of instructions in code sequence for restore");
}
@@ -369,7 +410,7 @@
// save %sp, -176, %sp
snippet.push_back(MK_SAVE_IMM(R_O6, R_O6, -176));
- assert(snippet.size() - initSize == getGenSaveSize() &&
+ assert(snippet.size() - initSize == GEN_SAVE_SIZE &&
"Unexpected number of instructions in code sequence for save");
}
@@ -377,8 +418,8 @@
// shared registers instead of dumping all of the code into the current snippet.
void SparcInstManip::generateRestoreShared(uint64_t restoreFromAddr,
- LogicalRegister tmp1,
- LogicalRegister tmp2)
+ LogicalRegister tmp1,
+ LogicalRegister tmp2)
{
assert(m_pCurrSnippet && "Invalid snippet for code generation");
assert(tmp1 != tmp2 && "Distinct logical registers required");
@@ -396,13 +437,13 @@
snippet.push_back(MK_LOAD_IMM(R_G6, tmpReg, 48));
snippet.push_back(MK_LOAD_IMM(R_G7, tmpReg, 56));
- assert(snippet.size() - initSize == getGenRestoreSharedSize() &&
+ assert(snippet.size() - initSize == GEN_RESTR_SHARED_SIZE &&
"Unexpected number of instructions in code sequence for restore shared");
}
void SparcInstManip::generateSpillShared(uint64_t spillToAddr,
- LogicalRegister tmp1,
- LogicalRegister tmp2)
+ LogicalRegister tmp1,
+ LogicalRegister tmp2)
{
assert(m_pCurrSnippet && "Invalid snippet for code generation");
assert(tmp1 != tmp2 && "Distinct logical registers required");
@@ -420,13 +461,13 @@
snippet.push_back(MK_STORE_IMM(R_G6, tmpReg, 48));
snippet.push_back(MK_STORE_IMM(R_G7, tmpReg, 56));
- assert(snippet.size() - initSize == getGenSpillSharedSize() &&
+ assert(snippet.size() - initSize == GEN_SPL_SHARED_SIZE &&
"Unexpected number of instructions in code sequence for spill shared");
}
void SparcInstManip::generateBranchAlways(uint64_t dest,
- uint64_t slotBase,
- unsigned delaySlotInstr)
+ uint64_t slotBase,
+ unsigned delaySlotInstr)
{
assert(m_pCurrSnippet && "Invalid snippet for code generation");
vector<unsigned>& snippet = *m_pCurrSnippet;
@@ -440,7 +481,7 @@
snippet.push_back(getBranchAlways(dest, branchInstAddr, false)); // annul bit low
snippet.push_back(delaySlotInstr);
- assert(snippet.size() - initSize == getGenBranchAlwaysSize() &&
+ assert(snippet.size() - initSize == GEN_BRANCH_ALWAYS_SIZE &&
"Unexpected number of instruction in code sequence for branch-always");
}
@@ -646,5 +687,10 @@
return currAddr;
}
+ return 0;
+}
+
+unsigned SparcInstManip::findAllocaOffset(uint64_t instAddr, uint64_t startAddr)
+{
return 0;
}
Index: llvm/lib/Reoptimizer/Inst/lib/SparcInstManip.h
diff -u llvm/lib/Reoptimizer/Inst/lib/SparcInstManip.h:1.4 llvm/lib/Reoptimizer/Inst/lib/SparcInstManip.h:1.5
--- llvm/lib/Reoptimizer/Inst/lib/SparcInstManip.h:1.4 Wed Apr 30 12:37:10 2003
+++ llvm/lib/Reoptimizer/Inst/lib/SparcInstManip.h Wed May 7 20:50:06 2003
@@ -31,8 +31,15 @@
virtual void buildSlot(Phase4Info* p3info,
std::vector<unsigned>& snippet);
+ virtual void buildSlot(PrimInfo* pi,
+ uint64_t slotBase,
+ uint64_t instAddr,
+ uint64_t startAddr,
+ std::vector<unsigned>& snippet);
+
virtual unsigned getSlotSize(Phase2* p2) const;
virtual unsigned getSlotSize(Phase3* p3, InstCandidate& cand) const;
+ virtual unsigned getSlotSize(Phase4* p4) const;
virtual void findCandidates(uint64_t start,
uint64_t end,
@@ -80,19 +87,6 @@
unsigned getRestoreInst() const;
inline unsigned getCallInst(uint64_t dest, uint64_t pc) const;
-
- // These need to become class constants where possible since this portion of the
- // interface is no longer externally visible and they never became virtual functions
- // as intended. TODO.
-
- unsigned getGenLoadSize() const { return 6; }
- unsigned getGenCallSize() const { return 2; }
- unsigned getGenBranchAlwaysSize() const { return 2; }
- unsigned getGenSaveSize() const { return 1; }
- unsigned getGenParamStoreSize() const { return 1; }
- unsigned getGenSpillSharedSize() const { return getGenLoadSize() + getSharedSize(); }
- unsigned getGenRestoreSharedSize() const { return getGenLoadSize() + getSharedSize(); }
- unsigned getGenRestoreSize() const { return 1; }
inline unsigned getGenAddressCopySize(unsigned loadInst) const;
uint64_t getPhase4SpillAddr() { return (uint64_t) sm_phase4SpillRegion; }
@@ -113,35 +107,43 @@
uint64_t findNextStackLoad(uint64_t addr,
uint64_t end,
unsigned fpOffset);
+
+ unsigned findAllocaOffset(uint64_t instAddr, uint64_t startAddr);
std::vector<unsigned>* m_pCurrSnippet;
OutputToInputRegMap m_outputToInputReg; // Maps input register -> output register
- // Branch-always (annul bit high) instruction base (i.e., address not filled in yet)
- static const unsigned BRANCH_ALWAYS_BASE_ANNUL;
-
- // Branch-always (annul bit low) instruction base (i.e., address not filled in yet)
- static const unsigned BRANCH_ALWAYS_BASE;
-
- // NOP instruction for SparcV9
- static const unsigned NOP_INST = 0x01000000;
-
// Size (in number of 64-bit words) required for storing shared registers
static const unsigned SHARED_SIZE = 7;
- // Instruction width (in bytes)
- static const unsigned INST_WIDTH = 4;
-
- // Sparc-specific constant used in SP manipulations
- static const unsigned BIAS;
-
- // Memory region into which to spill shared registers when executing a phase 4 slot
- // (i.e., the slot that invokes the phase4 function, the slot written by phase 3
+ // The memory region into which to spill shared registers when executing a phase 4
+ // slot (i.e., the slot that invokes the phase4 function, the slot written by phase 3
// invocations). NB: One region is sufficient and we do not need stack semantics
// because only one activation of a phase 4 slot ever occurs at a given time (assuming
// single-threaded execution).
- static uint64_t sm_phase4SpillRegion[SHARED_SIZE];
+ static uint64_t sm_phase4SpillRegion[SHARED_SIZE];
+
+ // Branch-always (annul bit high) instruction base (address not filled in yet)
+ static const unsigned BRANCH_ALWAYS_BASE_ANNUL;
+
+ // Branch-always (annul bit low) instruction base (address not filled in yet)
+ static const unsigned BRANCH_ALWAYS_BASE;
+
+ static const unsigned INST_WIDTH = 4; // In bytes
+ static const unsigned NOP_INST = 0x01000000;
+ static const unsigned BIAS = 2047;
+
+ // Fixed sizes of generated SparcV9 assembly snippets
+
+ static const unsigned GEN_LOAD_SIZE = 6;
+ static const unsigned GEN_CALL_SIZE = 2;
+ static const unsigned GEN_BRANCH_ALWAYS_SIZE = 2;
+ static const unsigned GEN_SAVE_SIZE = 1;
+ static const unsigned GEN_PSTORE_SIZE = 1;
+ static const unsigned GEN_RESTORE_SIZE = 1;
+ static const unsigned GEN_SPL_SHARED_SIZE = GEN_LOAD_SIZE + SHARED_SIZE;
+ static const unsigned GEN_RESTR_SHARED_SIZE = GEN_SPL_SHARED_SIZE;
};
unsigned SparcInstManip::getBranchAlways(uint64_t dest, uint64_t pc, bool annul) const
More information about the llvm-commits
mailing list