[llvm-commits] CVS: llvm/lib/Reoptimizer/Inst/lib/PhaseInfo.h Phases.cpp SparcInstManip.cpp SparcInstManip.h
Joel Stanley
jstanley at cs.uiuc.edu
Fri May 9 22:53:00 PDT 2003
Changes in directory llvm/lib/Reoptimizer/Inst/lib:
PhaseInfo.h updated: 1.3 -> 1.4
Phases.cpp updated: 1.25 -> 1.26
SparcInstManip.cpp updated: 1.5 -> 1.6
SparcInstManip.h updated: 1.6 -> 1.7
---
Log message:
Phase 5 slots are working for start-region instrumentation sites only.
---
Diffs of the changes:
Index: llvm/lib/Reoptimizer/Inst/lib/PhaseInfo.h
diff -u llvm/lib/Reoptimizer/Inst/lib/PhaseInfo.h:1.3 llvm/lib/Reoptimizer/Inst/lib/PhaseInfo.h:1.4
--- llvm/lib/Reoptimizer/Inst/lib/PhaseInfo.h:1.3 Thu May 8 11:27:25 2003
+++ llvm/lib/Reoptimizer/Inst/lib/PhaseInfo.h Fri May 9 23:01:50 2003
@@ -26,6 +26,7 @@
unsigned gbtType;
unsigned short* loadVar;
unsigned gbtStartIdx;
+ unsigned paramSize;
};
class Phase3Info
Index: llvm/lib/Reoptimizer/Inst/lib/Phases.cpp
diff -u llvm/lib/Reoptimizer/Inst/lib/Phases.cpp:1.25 llvm/lib/Reoptimizer/Inst/lib/Phases.cpp:1.26
--- llvm/lib/Reoptimizer/Inst/lib/Phases.cpp:1.25 Thu May 8 11:27:25 2003
+++ llvm/lib/Reoptimizer/Inst/lib/Phases.cpp Fri May 9 23:01:50 2003
@@ -493,7 +493,10 @@
//////////////// Phase 5 implementation ////////////////
-void phase5(PrimInfo* pi)
+void phase5(PrimInfo* pi, void* paramMem)
{
DEBUG_MSG("phase5 function invoked\n");
+ DEBUG_MSG("pi->paramSize == " << pi->paramSize << endl);
+ DEBUG_MSG("pi->loadVar (tag) == " << pi->loadVar << endl);
+ DEBUG_MSG("phase 5 function exiting\n");
}
Index: llvm/lib/Reoptimizer/Inst/lib/SparcInstManip.cpp
diff -u llvm/lib/Reoptimizer/Inst/lib/SparcInstManip.cpp:1.5 llvm/lib/Reoptimizer/Inst/lib/SparcInstManip.cpp:1.6
--- llvm/lib/Reoptimizer/Inst/lib/SparcInstManip.cpp:1.5 Thu May 8 11:27:25 2003
+++ llvm/lib/Reoptimizer/Inst/lib/SparcInstManip.cpp Fri May 9 23:01:50 2003
@@ -38,19 +38,19 @@
// NB: Slot does *not* save registers with the 'save' instruction, because
// it must perform the alloca within the stack frame of the code that
// invoked it.
-// +-------------------------------------------+
-// | manually-save clobbered registers |
-// | alloc spill area & inst param on stack |
-// | spill shared registers |
-// | copy PrimInfo ptr to param 1 |
-// | copy spill area addr to param 2 |
-// | call phase 5 |
-// | nop |
-// | restore shared registers |
-// | manually-restore clobbered registers |
-// | branch back to orig code |
-// | nop |
-// +-------------------------------------------+
+// +------------------------------------------------------+
+// | alloc spill area/reg save/inst param region on stack |
+// | manually-save clobbered registers |
+// | spill shared registers |
+// | copy PrimInfo ptr to param 1 |
+// | copy spill area addr to param 2 |
+// | call phase 5 |
+// | nop |
+// | restore shared registers |
+// | manually-restore clobbered registers |
+// | branch back to orig code |
+// | nop |
+// +------------------------------------------------------+
// []
#include <iostream>
@@ -77,7 +77,7 @@
void phase3(Phase3Info* p3info);
void phase4(uint64_t tag, Phase4Info* p4info);
-void phase5(PrimInfo* pi);
+void phase5(PrimInfo* pi, void* paramMem);
SparcInstManip::SparcInstManip(TraceCache* tc):
InstManip(tc, SHARED_SIZE, INST_WIDTH, NOP_INST),
@@ -155,10 +155,10 @@
generateSave();
generateAddressCopy(cand.front().second, REG_0, true); // REG_0 live to call
- generateParamStore(REG_0, PARAM_0);
+ generateStackStore(REG_0, PARAM_0);
generateSpillShared(spillAddr, REG_1, REG_2);
generateLoad((uint64_t) p4info, REG_1, REG_2); // REG_1 live to call
- generateParamStore(REG_1, PARAM_1);
+ generateStackStore(REG_1, PARAM_1);
generateCall((uint64_t) &phase4, slotBase);
generateRestoreShared(spillAddr);
generateBranchAlways(cand.front().first, slotBase, getRestoreInst());
@@ -181,21 +181,54 @@
// which is the address range of the enclosing function.
unsigned offset = findAllocaOffset(instAddr, extents);
+ unsigned sharedSize = WORD_WIDTH * getSharedSize();
+ unsigned stkSize = sharedSize + WORD_WIDTH * 2 + pi->paramSize;
+
+ if(stkSize % STACK_ALIGN != 0)
+ cerr << "Warning: not multiple of " << STACK_ALIGN << endl;
+
DEBUG_MSG("buildSlot(p5) obtained offset " << std::dec
<< offset << std::hex << endl);
+ // After our alloca'd stack region looks like:
+ // sp + BIAS + stkSize -> +--------------------------------+
+ // | inst function parameter memory | } pi->paramSize
+ // +--------------------------------+
+ // | save area for clobbered regs | } WORD_WIDTH * 2
+ // +--------------------------------+
+ // | spill region for shared regs | } sharedSize
+ // sp + BIAS + offset -> +--------------------------------+
+
+ // TODO: ensure that stack size is aligned properly
+
startCode(snippet);
- generateSave();
+ generateAlloca(stkSize);
+
+ // "Manually" save REG_0, REG_1
+ generateStackStore(REG_0, offset + sharedSize);
+ generateStackStore(REG_1, offset + sharedSize + WORD_WIDTH);
+
+ generateSpillShared(offset);
+
+ generateLoad((uint64_t) pi, REG_0, REG_1); // REG_0 live to call
+ generateStackStore(REG_0, PARAM_0);
+
+ generateSPLoad(REG_1, offset + stkSize - pi->paramSize); // REG_1 live to call
+ generateStackStore(REG_1, PARAM_1);
+
generateCall((uint64_t) &phase5, slotBase);
+ generateRestoreShared(offset);
- // We need to branch back to one instruction beyond instruction that branches to the
- // phase 5 slot.
- generateBranchAlways(instAddr + getInstWidth(), slotBase, getRestoreInst());
+ // "Manually" restore REG_0, REG_1
+ generateStackLoad(REG_0, offset + sharedSize);
+ generateStackLoad(REG_1, offset + sharedSize + WORD_WIDTH);
+ // We need to branch back to one instruction beyond the branch to the phase 5 slot.
+ generateBranchAlways(instAddr + getInstWidth(), slotBase, getNOP());
endCode();
- // TODO: Add assert against against the snippet.
+ // TODO: Add assert against against the snippet size.
}
unsigned SparcInstManip::getSlotSize(Phase2* p2) const
@@ -207,10 +240,10 @@
(void) p2;
return GEN_SAVE_SIZE +
- GEN_SPL_SHARED_SIZE +
+ GEN_SPL_SIZE +
GEN_LOAD_SIZE +
GEN_CALL_SIZE +
- GEN_RESTR_SHARED_SIZE +
+ GEN_UNSPL_SIZE +
GEN_BRANCH_ALWAYS_SIZE;
}
@@ -224,12 +257,12 @@
return GEN_SAVE_SIZE +
getGenAddressCopySize(cand.front().second) +
- GEN_PSTORE_SIZE +
- GEN_SPL_SHARED_SIZE +
+ GEN_STKSTORE_SIZE +
+ GEN_SPL_SIZE +
GEN_LOAD_SIZE +
- GEN_PSTORE_SIZE +
+ GEN_STKSTORE_SIZE +
GEN_CALL_SIZE +
- GEN_RESTR_SHARED_SIZE +
+ GEN_UNSPL_SIZE +
GEN_BRANCH_ALWAYS_SIZE;
}
@@ -241,7 +274,20 @@
(void) p4;
- return GEN_SAVE_SIZE + GEN_CALL_SIZE + GEN_BRANCH_ALWAYS_SIZE;
+ return GEN_ALLOCA_SIZE +
+ GEN_STKSTORE_SIZE +
+ GEN_STKSTORE_SIZE +
+ GEN_SPL_STK_SIZE +
+ GEN_LOAD_SIZE +
+ GEN_STKSTORE_SIZE +
+ GEN_SPLOAD_SIZE +
+ GEN_STKSTORE_SIZE +
+ GEN_CALL_SIZE +
+ GEN_STKSTORE_SIZE +
+ GEN_UNSPL_STK_SIZE +
+ GEN_STKLOAD_SIZE +
+ GEN_STKLOAD_SIZE +
+ GEN_BRANCH_ALWAYS_SIZE;
}
void SparcInstManip::findCandidates(const std::pair<uint64_t, uint64_t>& range,
@@ -292,6 +338,31 @@
fflush(stdout);
}
+void SparcInstManip::generateSPLoad(LogicalRegister reg, unsigned offset)
+{
+ // Loads the value of %sp + offset into reg
+ assert(m_pCurrSnippet && "Invalid snippet for code generation");
+ unsigned initSize = m_pCurrSnippet->size();
+
+ m_pCurrSnippet->push_back(MK_ADD_R_I(m_logicalToActualReg[reg], R_O6, offset));
+
+ assert(m_pCurrSnippet->size() - initSize == GEN_SPLOAD_SIZE &&
+ "Unexpected number of instructions in code sequence for SP load");
+}
+
+void SparcInstManip::generateAlloca(unsigned size)
+{
+ assert(m_pCurrSnippet && "Invalid snippet for code generation");
+ assert(size % STACK_ALIGN == 0 && "SP size is not aligned");
+
+ unsigned initSize = m_pCurrSnippet->size();
+
+ m_pCurrSnippet->push_back(MK_ADD_R_I(R_O6, R_O6, -size));
+
+ assert(m_pCurrSnippet->size() - initSize == GEN_ALLOCA_SIZE &&
+ "Unexpected number of instructions in code sequence for SP add");
+}
+
void SparcInstManip::generateLoad(uint64_t value,
LogicalRegister dest,
LogicalRegister tmp)
@@ -366,18 +437,32 @@
"Unexpected number of instructions in code sequence for address copy");
}
-void SparcInstManip::generateParamStore(LogicalRegister src,
- StackOffset off)
+void SparcInstManip::generateStackLoad(LogicalRegister dest,
+ unsigned stkOffset)
{
assert(m_pCurrSnippet && "Invalid snippet for code generation");
vector<unsigned>& snippet = *m_pCurrSnippet;
unsigned initSize = snippet.size();
- unsigned srcReg = m_logicalToActualReg[src];
- snippet.push_back(MK_STX_STACK(srcReg, BIAS + off));
+ snippet.push_back(MK_LDX_STACK(m_logicalToActualReg[dest], BIAS + stkOffset));
- assert(snippet.size() - initSize == GEN_PSTORE_SIZE &&
+ assert(snippet.size() - initSize == GEN_STKSTORE_SIZE &&
+ "Unexpected number of instructions in code sequence for parameter store");
+}
+
+
+void SparcInstManip::generateStackStore(LogicalRegister src,
+ unsigned stkOffset)
+{
+ assert(m_pCurrSnippet && "Invalid snippet for code generation");
+ vector<unsigned>& snippet = *m_pCurrSnippet;
+
+ unsigned initSize = snippet.size();
+
+ snippet.push_back(MK_STX_STACK(m_logicalToActualReg[src], BIAS + stkOffset));
+
+ assert(snippet.size() - initSize == GEN_STKSTORE_SIZE &&
"Unexpected number of instructions in code sequence for parameter store");
}
@@ -436,6 +521,34 @@
// TODO: It will be worthwhile to generate calls to functions that spill/restore the
// shared registers instead of dumping all of the code into the current snippet.
+static void generateRestoreShared(vector<unsigned>& snippet,
+ unsigned reg,
+ const unsigned width,
+ unsigned offset = 0)
+{
+ snippet.push_back(MK_LOAD_IMM(R_G1, reg, offset + 0 * width));
+ snippet.push_back(MK_LOAD_IMM(R_G2, reg, offset + 1 * width));
+ snippet.push_back(MK_LOAD_IMM(R_G3, reg, offset + 2 * width));
+ snippet.push_back(MK_LOAD_IMM(R_G4, reg, offset + 3 * width));
+ snippet.push_back(MK_LOAD_IMM(R_G5, reg, offset + 4 * width));
+ snippet.push_back(MK_LOAD_IMM(R_G6, reg, offset + 5 * width));
+ snippet.push_back(MK_LOAD_IMM(R_G7, reg, offset + 6 * width));
+}
+
+void SparcInstManip::generateRestoreShared(unsigned offset)
+{
+ // Un-spill from the stack -- assumes %sp + BIAS + offset points to a valid stack
+ // location.
+
+ assert(m_pCurrSnippet && "Invalid snippet for code generation");
+ unsigned initSize = m_pCurrSnippet->size();
+
+ ::generateRestoreShared(*m_pCurrSnippet, R_O6, WORD_WIDTH, offset + BIAS);
+
+ assert(m_pCurrSnippet->size() - initSize == GEN_UNSPL_STK_SIZE &&
+ "Unexpected number of instructions in code sequence for spill to stack");
+}
+
void SparcInstManip::generateRestoreShared(uint64_t restoreFromAddr,
LogicalRegister tmp1,
LogicalRegister tmp2)
@@ -445,42 +558,57 @@
vector<unsigned>& snippet = *m_pCurrSnippet;
unsigned initSize = snippet.size();
- unsigned tmpReg = m_logicalToActualReg[tmp1];
generateLoad(restoreFromAddr, tmp1, tmp2);
- snippet.push_back(MK_LOAD_IMM(R_G1, tmpReg, 8));
- snippet.push_back(MK_LOAD_IMM(R_G2, tmpReg, 16));
- snippet.push_back(MK_LOAD_IMM(R_G3, tmpReg, 24));
- snippet.push_back(MK_LOAD_IMM(R_G4, tmpReg, 32));
- snippet.push_back(MK_LOAD_IMM(R_G5, tmpReg, 40));
- snippet.push_back(MK_LOAD_IMM(R_G6, tmpReg, 48));
- snippet.push_back(MK_LOAD_IMM(R_G7, tmpReg, 56));
+ ::generateRestoreShared(snippet, m_logicalToActualReg[tmp1], WORD_WIDTH);
- assert(snippet.size() - initSize == GEN_RESTR_SHARED_SIZE &&
+ assert(snippet.size() - initSize == GEN_UNSPL_SIZE &&
"Unexpected number of instructions in code sequence for restore shared");
}
+static void generateSpillShared(vector<unsigned>& snippet,
+ unsigned reg,
+ const unsigned width,
+ unsigned offset = 0)
+{
+ snippet.push_back(MK_STORE_IMM(R_G1, reg, offset + 0 * width));
+ snippet.push_back(MK_STORE_IMM(R_G2, reg, offset + 1 * width));
+ snippet.push_back(MK_STORE_IMM(R_G3, reg, offset + 2 * width));
+ snippet.push_back(MK_STORE_IMM(R_G4, reg, offset + 3 * width));
+ snippet.push_back(MK_STORE_IMM(R_G5, reg, offset + 4 * width));
+ snippet.push_back(MK_STORE_IMM(R_G6, reg, offset + 5 * width));
+ snippet.push_back(MK_STORE_IMM(R_G7, reg, offset + 6 * width));
+}
+
+void SparcInstManip::generateSpillShared(unsigned offset)
+{
+ // Spill to the stack -- assumes %sp + BIAS + offset points to a valid stack location,
+ // and that there is sufficient valid memory at %sp + BIAS + offset for the entire
+ // spill size.
+
+ assert(m_pCurrSnippet && "Invalid snippet for code generation");
+ unsigned initSize = m_pCurrSnippet->size();
+
+ ::generateSpillShared(*m_pCurrSnippet, R_O6, WORD_WIDTH, BIAS + offset);
+
+ assert(m_pCurrSnippet->size() - initSize == GEN_SPL_STK_SIZE &&
+ "Unexpected number of instructions in code sequence for spill to stack");
+}
+
void SparcInstManip::generateSpillShared(uint64_t spillToAddr,
LogicalRegister tmp1,
- LogicalRegister tmp2)
+ LogicalRegister tmp2)
{
assert(m_pCurrSnippet && "Invalid snippet for code generation");
assert(tmp1 != tmp2 && "Distinct logical registers required");
vector<unsigned>& snippet = *m_pCurrSnippet;
unsigned initSize = snippet.size();
- unsigned tmpReg = m_logicalToActualReg[tmp1];
generateLoad(spillToAddr, tmp1, tmp2);
- snippet.push_back(MK_STORE_IMM(R_G1, tmpReg, 8));
- snippet.push_back(MK_STORE_IMM(R_G2, tmpReg, 16));
- snippet.push_back(MK_STORE_IMM(R_G3, tmpReg, 24));
- snippet.push_back(MK_STORE_IMM(R_G4, tmpReg, 32));
- snippet.push_back(MK_STORE_IMM(R_G5, tmpReg, 40));
- snippet.push_back(MK_STORE_IMM(R_G6, tmpReg, 48));
- snippet.push_back(MK_STORE_IMM(R_G7, tmpReg, 56));
+ ::generateSpillShared(snippet, m_logicalToActualReg[tmp1], WORD_WIDTH);
- assert(snippet.size() - initSize == GEN_SPL_SHARED_SIZE &&
+ assert(snippet.size() - initSize == GEN_SPL_SIZE &&
"Unexpected number of instructions in code sequence for spill shared");
}
Index: llvm/lib/Reoptimizer/Inst/lib/SparcInstManip.h
diff -u llvm/lib/Reoptimizer/Inst/lib/SparcInstManip.h:1.6 llvm/lib/Reoptimizer/Inst/lib/SparcInstManip.h:1.7
--- llvm/lib/Reoptimizer/Inst/lib/SparcInstManip.h:1.6 Thu May 8 11:27:25 2003
+++ llvm/lib/Reoptimizer/Inst/lib/SparcInstManip.h Fri May 9 23:01:50 2003
@@ -19,12 +19,6 @@
public:
SparcInstManip(TraceCache* tc);
- // Offsets in stack frame for function parameters
- enum StackOffset {
- PARAM_0 = 128,
- PARAM_1 = 136
- };
-
virtual void buildSlot(Phase3Info* p3info,
std::vector<unsigned>& snippet);
@@ -67,22 +61,41 @@
void generateCall(uint64_t dest, uint64_t slotBase);
+ void generateSPLoad(LogicalRegister reg, unsigned offset);
+
+ // generateAlloca - Generate code to allocate 'size' bytes on the stack
+ void generateAlloca(unsigned size);
+
void generateLoad(uint64_t value,
LogicalRegister dest,
LogicalRegister tmp);
- void generateParamStore(LogicalRegister src, StackOffset off);
+ void generateStackStore(LogicalRegister src, unsigned stkOffset);
+ void generateStackLoad(LogicalRegister dest, unsigned stkOffset);
void generateRestore();
void generateSave();
+ // generateRestoreShared - Generate code to un-spill the shared registers from
+ // restoreFromAddr, using tmp1 and tmp2 as temporary registers
+
void generateRestoreShared(uint64_t restoreFromAddr,
LogicalRegister tmp1 = REG_0,
LogicalRegister tmp2 = REG_1);
+
+ // generateRestoreShared - Generate code to un-spill the shared registers from the memory
+ // at %sp + BIAS + offset
+ void generateRestoreShared(unsigned offset);
+ // generateSpillShared - Generate code to spill the shared registers to spillFromAddr,
+ // using tmp1 and tmp2 as temporary registers
void generateSpillShared(uint64_t spillFromAddr,
LogicalRegister tmp1 = REG_0,
LogicalRegister tmp2 = REG_1);
+
+ // generateSpillShared - Generate code to spill the shared registers to the memory at
+ // %sp + BIAS + offset
+ void generateSpillShared(unsigned offset);
unsigned getRestoreInst() const;
inline unsigned getCallInst(uint64_t dest, uint64_t pc) const;
@@ -111,7 +124,7 @@
const std::pair<uint64_t, uint64_t>& range);
static bool isAllocaSignature(unsigned inst, unsigned& offset);
-
+
std::vector<unsigned>* m_pCurrSnippet;
OutputToInputRegMap m_outputToInputReg; // Maps input register -> output register
@@ -137,17 +150,24 @@
static const unsigned BIAS = 2047;
static const unsigned STACK_ALIGN = 16;
static const unsigned SEARCH_DELTA = 20;
+ static const unsigned WORD_WIDTH = 8;
+ static const unsigned PARAM_0 = 128;
+ static const unsigned PARAM_1 = PARAM_0 + 8;
// Fixed sizes of generated SparcV9 assembly snippets
-
static const unsigned GEN_LOAD_SIZE = 6;
static const unsigned GEN_CALL_SIZE = 2;
static const unsigned GEN_BRANCH_ALWAYS_SIZE = 2;
static const unsigned GEN_SAVE_SIZE = 1;
- static const unsigned GEN_PSTORE_SIZE = 1;
+ static const unsigned GEN_STKSTORE_SIZE = 1;
+ static const unsigned GEN_STKLOAD_SIZE = 1;
static const unsigned GEN_RESTORE_SIZE = 1;
- static const unsigned GEN_SPL_SHARED_SIZE = GEN_LOAD_SIZE + SHARED_SIZE;
- static const unsigned GEN_RESTR_SHARED_SIZE = GEN_SPL_SHARED_SIZE;
+ static const unsigned GEN_SPL_SIZE = GEN_LOAD_SIZE + SHARED_SIZE;
+ static const unsigned GEN_SPL_STK_SIZE = SHARED_SIZE;
+ static const unsigned GEN_UNSPL_SIZE = GEN_SPL_SIZE;
+ static const unsigned GEN_UNSPL_STK_SIZE = GEN_SPL_STK_SIZE;
+ static const unsigned GEN_ALLOCA_SIZE = 1;
+ static const unsigned GEN_SPLOAD_SIZE = 1;
};
unsigned SparcInstManip::getBranchAlways(uint64_t dest, uint64_t pc, bool annul) const
More information about the llvm-commits
mailing list