[llvm-commits] CVS: llvm/lib/Reoptimizer/Inst/SparcInstManip.cpp SparcInstManip.h InstManip.cpp InstManip.h Phases.cpp

Joel Stanley jstanley at cs.uiuc.edu
Tue Apr 29 22:02:01 PDT 2003


Changes in directory llvm/lib/Reoptimizer/Inst:

SparcInstManip.cpp added (r1.1)
SparcInstManip.h added (r1.1)
InstManip.cpp updated: 1.9 -> 1.10
InstManip.h updated: 1.10 -> 1.11
Phases.cpp updated: 1.15 -> 1.16

---
Log message:

Intermediary refactoring point. Made SparcInstManip, a subclass of InstManip.



---
Diffs of the changes:

Index: llvm/lib/Reoptimizer/Inst/SparcInstManip.cpp
diff -c /dev/null llvm/lib/Reoptimizer/Inst/SparcInstManip.cpp:1.1
*** /dev/null	Tue Apr 29 22:08:13 2003
--- llvm/lib/Reoptimizer/Inst/SparcInstManip.cpp	Tue Apr 29 22:08:03 2003
***************
*** 0 ****
--- 1,522 ----
+ ////////////////
+ // programmer: Joel Stanley
+ //       date: Tue Apr 29 21:21:50 CDT 2003
+ //     fileid: SparcInstManip.cpp
+ //    purpose: Implements the SparcInstManip class as described in SparcInstManip.h
+ 
+ #include <iostream>
+ #include <iomanip>
+ 
+ #include "llvm/Reoptimizer/VirtualMem.h"
+ #include "llvm/Reoptimizer/BinInterface/sparc9.h"
+ #include "llvm/Reoptimizer/BinInterface/bitmath.h"
+ 
+ #include "SparcInstManip.h"
+ 
+ const unsigned SparcInstManip::NOP_INST = 0x01000000;
+ const unsigned SparcInstManip::BRANCH_ALWAYS_BASE = 0x10480000;
+ const unsigned SparcInstManip::BRANCH_ALWAYS_BASE_ANNUL = 0x30480000;
+ const unsigned SparcInstManip::BIAS = 2047;
+ uint64_t SparcInstManip::sm_phase3SpillRegion[SparcInstManip::SHARED_SIZE];
+ 
+ using std::cout;
+ using std::cerr;
+ using std::endl;
+ 
+ SparcInstManip::SparcInstManip(VirtualMem* vm):
+     InstManip(vm),
+     m_pCurrSnippet(0)
+ {
+     assert(vm && "SparcInstManip requires valid VirtualMem instance");
+ 
+     // Populate logical->actual register map. Since this SparcInstManip class is
+     // SparcV9-specific, we map to the values used by the BinInterface library and macros.
+ 
+     m_logicalToActualReg[REG_0] = R_O0;
+     m_logicalToActualReg[REG_1] = R_O1;
+     m_logicalToActualReg[REG_2] = R_O2;
+ 
+     // Populate output->input register map. This is SparcV9 specific and corresponds to
+     // the register mapping that occurs after a 'save' instruction is issued. Shared and
+     // local registers map to themselves.
+ 
+     m_outputToInputReg[R_O0] = R_I0;
+     m_outputToInputReg[R_O1] = R_I1;
+     m_outputToInputReg[R_O2] = R_I2;
+     m_outputToInputReg[R_O3] = R_I3;
+     m_outputToInputReg[R_O4] = R_I4;
+     m_outputToInputReg[R_O5] = R_I5;
+     m_outputToInputReg[R_O6] = R_I6;
+     m_outputToInputReg[R_O7] = R_I7;
+ 
+     for(unsigned i = R_G0; i <= R_G7; ++i)
+         m_outputToInputReg[i] = i;
+     for(unsigned i = R_L0; i <= R_L7; ++i)
+         m_outputToInputReg[i] = i;
+ }
+ 
+ void SparcInstManip::printRange(unsigned* start,
+                            unsigned* end) const
+ {
+     // Dumps contents (and corresponding disassembly) of memory range given by range
+     // to stdout.  TODO: Parameterize by an ostream instance; cannot do this yet
+     // because BinInterface is hard-coded to use printf and must be changed.
+         
+     cout << "Sparc dissassembly of range ["
+               << start << ", " << end << "]:" << endl;
+ 
+     for(; start <= end; ++start) {
+         cout << start << " | " 
+                   << std::hex << std::setw(8) << std::setfill('0')
+                   << *start << " | ";
+         sparc_print(*start);
+         cout << endl;
+     }
+ }
+ 
+ void SparcInstManip::printInst(unsigned inst) const
+ {
+     sparc_print(inst);
+     fflush(stdout);
+ }
+ 
+ uint64_t SparcInstManip::skipFunctionHdr(uint64_t addr) const
+ {
+     // For SparcV9, what we're calling the "function header" is the save instruction (if
+     // present) that occurs as the first instruction of the function.
+     
+     unsigned inst = m_pVM->readInstrFrmVm(addr);
+     assert(RD_FLD(inst, INSTR_OP) == OP_2 &&
+            RD_FLD(inst, INSTR_OP3) == OP3_SAVE &&
+            "Unhandled case: non-save instruction in function header");
+     
+     return addr + getInstWidth();
+ }
+ 
+ void SparcInstManip::generateLoad(uint64_t value,
+                              LogicalRegister dest,
+                              LogicalRegister tmp)
+ {
+     // When reg == REG_0, load the 64-bit value into %o0, using %o0 and %o1.
+     // When reg == REG_1, load the 64-bit value into %o1, using %o1 and %o2.
+     // The sequence of instructions is placed into the provided instruction vector.
+ 
+     assert(m_pCurrSnippet && "Invalid snippet for code generation");
+     assert(dest != tmp && "Distinct logical registers required");
+     std::vector<unsigned>& snippet = *m_pCurrSnippet;
+     
+     unsigned initSize = snippet.size();
+     unsigned destReg = m_logicalToActualReg[dest];
+     unsigned tmpReg = m_logicalToActualReg[tmp];
+     
+     // sethi (upper 22b of upper wrd), %destReg
+     snippet.push_back(MK_SETHI(destReg, HIGH22(HIGHWORD(value))));
+ 
+     // or %o0, (lower 10b of upper wrd), %destReg
+     snippet.push_back(MK_LOGIC_IMM(OP3_OR, destReg, destReg, LOW10(HIGHWORD(value))));
+ 
+     // sllx %o0, 32, %destReg
+     snippet.push_back(MK_SHIFTX(OP3_SLL, destReg, destReg, 32));
+ 
+     // sethi (upper 22b of lwr wrd), %tmpReg
+     snippet.push_back(MK_SETHI(tmpReg, HIGH22(LOWWORD(value))));
+ 
+     // or %destReg, %tmpReg, %destReg
+     snippet.push_back(MK_LOGIC(OP3_OR, destReg, destReg, tmpReg));
+ 
+     // add %destReg, (lwr 10b of lwr wrd), %destReg
+     snippet.push_back(MK_ADD_R_I(destReg, destReg, LOW10(LOWWORD(value))));
+ 
+     assert(snippet.size() - initSize == getGenLoadSize() &&
+            "Unexpected number of instructions in code sequence for 64-bit value -> %dest");
+ }
+ 
+ void SparcInstManip::generateAddressCopy(unsigned loadInst,
+                                     LogicalRegister dest,
+                                     bool afterSave)
+ {
+     // NB: After save instruction has been issued, the output registers are mapped to the
+     // input registers.  
+ 
+     assert(m_pCurrSnippet && "Invalid snippet for code generation");
+     std::vector<unsigned>& snippet = *m_pCurrSnippet;
+ 
+     unsigned initSize = snippet.size();
+     unsigned destReg = m_logicalToActualReg[dest];
+     unsigned rs1 = RD_FLD(loadInst, INSTR_RS1);
+ 
+     if(afterSave)
+         rs1 = m_outputToInputReg[rs1];
+ 
+     if(RD_FLD(loadInst, INSTR_I)) {
+         // Case 1: load is immediate-valued --> reg, imm value add instruction needed
+         unsigned imm = RD_FLD(loadInst, INSTR_SIMM13);
+         snippet.push_back(MK_ADD_R_I(destReg, rs1, imm));
+     }
+     else {
+         // Case 2: load is register-valued --> reg, reg add instruction needed
+         unsigned rs2 = RD_FLD(loadInst, INSTR_RS2);
+ 
+         if(afterSave)
+             rs2 = m_outputToInputReg[rs2];
+         
+         snippet.push_back(MK_ADD_R_R(destReg, rs1, rs2));
+     }
+ 
+     assert(snippet.size() - initSize == getGenAddressCopySize(loadInst) &&
+            "Unexpected number of instructions in code sequence for address copy");
+ }
+ 
+ void SparcInstManip::generateParamStore(LogicalRegister src,
+                                    StackOffset off)
+ {
+     assert(m_pCurrSnippet && "Invalid snippet for code generation");
+     std::vector<unsigned>& snippet = *m_pCurrSnippet;
+ 
+     unsigned initSize = snippet.size();
+     unsigned srcReg = m_logicalToActualReg[src];
+ 
+     snippet.push_back(MK_STX_STACK(srcReg, BIAS + off));
+ 
+     assert(snippet.size() - initSize == getGenParamStoreSize() &&
+            "Unexpected number of instructions in code sequence for parameter store");
+ }
+ 
+ void SparcInstManip::generateCall(uint64_t dest,
+                              uint64_t slotBase)
+ {
+     assert(m_pCurrSnippet && "Invalid snippet for code generation");
+     std::vector<unsigned>& snippet = *m_pCurrSnippet;
+ 
+     unsigned initSize = snippet.size();
+     
+     // Calculate address of call instruction from slotBase
+     uint64_t callInstAddr = slotBase + getInstWidth() * snippet.size();
+ 
+     // Add call instruction and nop (for call delay slot) to code snippet.
+     snippet.push_back(getCallInst(dest, callInstAddr));
+     snippet.push_back(NOP_INST);
+ 
+     assert(snippet.size() - initSize == getGenCallSize() &&
+            "Unexpected number of instructions in code sequence for call");
+ }
+ 
+ unsigned SparcInstManip::getRestoreInst() const
+ {
+     // restore %g0, 0, %g0
+     return MK_RESTORE_IMM(R_G0, R_G0, 0);
+ }
+ 
+ void SparcInstManip::generateRestore()
+ {
+     assert(m_pCurrSnippet && "Invalid snippet for code generation");
+     std::vector<unsigned>& snippet = *m_pCurrSnippet;
+ 
+     unsigned initSize = snippet.size();
+ 
+     snippet.push_back(getRestoreInst());
+ 
+     assert(snippet.size() - initSize == getGenRestoreSize() &&
+            "Unexpected number of instructions in code sequence for restore");
+ }
+ 
+ void SparcInstManip::generateSave()
+ {
+     assert(m_pCurrSnippet && "Invalid snippet for code generation");
+     std::vector<unsigned>& snippet = *m_pCurrSnippet;
+ 
+     unsigned initSize = snippet.size();    
+ 
+     // save %sp, -176, %sp
+     snippet.push_back(MK_SAVE_IMM(R_O6, R_O6, -176));
+ 
+     assert(snippet.size() - initSize == getGenSaveSize() &&
+            "Unexpected number of instructions in code sequence for save");
+ }
+ 
+ // TODO: It will be worthwhile to generate calls to functions that spill/restore the
+ // shared registers instead of dumping all of the code into the current snippet.
+ 
+ void SparcInstManip::generateRestoreShared(uint64_t restoreFromAddr,
+                                       LogicalRegister tmp1,
+                                       LogicalRegister tmp2) 
+ {
+     assert(m_pCurrSnippet && "Invalid snippet for code generation");
+     assert(tmp1 != tmp2 && "Distinct logical registers required");
+ 
+     std::vector<unsigned>& snippet = *m_pCurrSnippet;
+     unsigned initSize = snippet.size();
+     unsigned tmpReg = m_logicalToActualReg[tmp1];
+ 
+     generateLoad(restoreFromAddr, tmp1, tmp2);
+     snippet.push_back(MK_LOAD_IMM(R_G1, tmpReg, 8));
+     snippet.push_back(MK_LOAD_IMM(R_G2, tmpReg, 16));
+     snippet.push_back(MK_LOAD_IMM(R_G3, tmpReg, 24));
+     snippet.push_back(MK_LOAD_IMM(R_G4, tmpReg, 32));
+     snippet.push_back(MK_LOAD_IMM(R_G5, tmpReg, 40));
+     snippet.push_back(MK_LOAD_IMM(R_G6, tmpReg, 48));
+     snippet.push_back(MK_LOAD_IMM(R_G7, tmpReg, 56));
+ 
+     assert(snippet.size() - initSize == getGenRestoreSharedSize() &&
+            "Unexpected number of instructions in code sequence for restore shared");
+ }
+ 
+ void SparcInstManip::generateSpillShared(uint64_t spillToAddr,
+                                     LogicalRegister tmp1,
+                                     LogicalRegister tmp2) 
+ {
+     assert(m_pCurrSnippet && "Invalid snippet for code generation");
+     assert(tmp1 != tmp2 && "Distinct logical registers required");
+ 
+     std::vector<unsigned>& snippet = *m_pCurrSnippet;
+     unsigned initSize = snippet.size();    
+     unsigned tmpReg = m_logicalToActualReg[tmp1];
+ 
+     generateLoad(spillToAddr, tmp1, tmp2);
+     snippet.push_back(MK_STORE_IMM(R_G1, tmpReg, 8));
+     snippet.push_back(MK_STORE_IMM(R_G2, tmpReg, 16));
+     snippet.push_back(MK_STORE_IMM(R_G3, tmpReg, 24));
+     snippet.push_back(MK_STORE_IMM(R_G4, tmpReg, 32));
+     snippet.push_back(MK_STORE_IMM(R_G5, tmpReg, 40));
+     snippet.push_back(MK_STORE_IMM(R_G6, tmpReg, 48));
+     snippet.push_back(MK_STORE_IMM(R_G7, tmpReg, 56));
+ 
+     assert(snippet.size() - initSize == getGenSpillSharedSize() &&
+            "Unexpected number of instructions in code sequence for spill shared");
+ }
+ 
+ void SparcInstManip::generateBranchAlways(uint64_t dest,
+                                      uint64_t slotBase,
+                                      unsigned delaySlotInstr)
+ {
+     assert(m_pCurrSnippet && "Invalid snippet for code generation");
+     std::vector<unsigned>& snippet = *m_pCurrSnippet;
+ 
+     unsigned initSize = snippet.size();
+     
+     // Calculate address of branch instruction from slotBase
+     uint64_t branchInstAddr = slotBase + getInstWidth() * snippet.size();
+ 
+     // Add branch instruction and the specified delay slot instruction to code snippet.
+     snippet.push_back(getBranchAlways(dest, branchInstAddr, false)); // annul bit low
+     snippet.push_back(delaySlotInstr);
+ 
+     assert(snippet.size() - initSize == getGenBranchAlwaysSize() &&
+            "Unexpected number of instruction in code sequence for branch-always");
+ }
+ 
+ void SparcInstManip::findCandidates(uint64_t start,
+                                uint64_t end,
+                                std::vector<InstCandidate>& candidates) 
+ {
+     for(uint64_t currAddr = start; currAddr <= end; currAddr += getInstWidth()) {
+         InstCandidate cand(this);
+         if(isCandidateLoad(currAddr, end, cand))
+             candidates.push_back(cand);
+     }
+ }
+ 
+ static inline bool isLoadHalfWord(unsigned inst)
+ {
+     // Returns true if inst is an LDUH instruction
+     return RD_FLD(inst, INSTR_OP) == OP_3 &&
+         RD_FLD(inst, INSTR_OP3) == OP3_LDUH;
+ }
+ 
+ static inline bool isLoadByte(unsigned inst) 
+ {
+     // Returns true if inst is a LDUB instruction
+     return RD_FLD(inst, INSTR_OP) == OP_3 &&
+         RD_FLD(inst, INSTR_OP3) == OP3_LDUB;
+ }
+ 
+ static inline bool isFPRelative(unsigned inst) 
+ {
+     return RD_FLD(inst, INSTR_RS1) == R_BP && RD_FLD(inst, INSTR_I) == 1;
+ }
+ 
+ static inline bool isSTH(unsigned inst) 
+ {
+     return RD_FLD(inst, INSTR_OP) == OP_3 &&
+         RD_FLD(inst, INSTR_OP3) == OP3_STH;
+ }
+ 
+ static inline bool isSTB(unsigned inst) 
+ {
+     return RD_FLD(inst, INSTR_OP) == OP_3 &&
+         RD_FLD(inst, INSTR_OP3) == OP3_STB;
+ }
+ 
+ static inline unsigned getLoadDest(unsigned inst) 
+ {
+     // Assumes that inst is a load instruction, and returns the register ID of its
+     // destination operand.
+ 
+     return RD_FLD(inst, INSTR_RD);
+ }
+ 
+ static inline unsigned getStoreSrc(unsigned inst)
+ {
+     // Assumes that inst is a stb/sth instruction, and returns the register ID of its
+     // source operand (by source, we don't mean rs1 or rs2, but rather rd, which specifies
+     // the register which contains the value being stored); 
+ 
+     return RD_FLD(inst, INSTR_RD);
+ }
+ 
+ static inline unsigned getFPOffset(unsigned inst) 
+ {
+     assert(isFPRelative(inst) && "Expect instruction to be FP-relative");
+     return RD_FLD(inst, INSTR_SIMM13);
+ }
+ 
+ bool SparcInstManip::determineSchema(InstCandidate& cand,
+                                 uint64_t end,
+                                 std::pair<uint64_t, unsigned>& load,
+                                 std::pair<uint64_t, unsigned>& store)
+ {
+     // inst1 contains the load instruction (the actual candidate). inst2 contains the
+     // corresponding store instruction, which is either STB or STH.  If STB, take actions
+     // for schema 1, and if STH, schema 2.
+     
+     if(isSTB(store.second)) {
+         // Schema 1: "direct" pattern
+         cand.setType(InstCandidate::DIRECT);
+         cand.push_back(load);
+         cand.push_back(store);
+         return true;
+     }
+     else {
+         assert(isSTH(store.second) && "Instruction must be STH");
+ 
+         // We have potentially discovered an instance of schema 2, but must search
+         // more to determine if this is the case.
+         // 
+         // KIS heuristic concession: The STH given by storeInst *must* be storing to the stack
+         // in an fp-relative manner; if not, we deny the originating load's candidacy.
+                 
+         if(isFPRelative(store.second)) {
+             // Search forward until a LDUB from same stack location (+1) as the STH wrote to
+             // is encountered.  The +1 in specified in the FP offset we're searching for is
+             // due to the fact that we stored a half-word but are loading a byte.
+ 
+             if(uint64_t stkLoadAddr = findNextStackLoad(store.first, end, getFPOffset(store.second) + 1)) {
+                 // Last schema-2 search: find the STB instruction that stores from the
+                 // LDUB's destination register.
+                         
+                 unsigned ldubInst = m_pVM->readInstrFrmVm(stkLoadAddr);
+                 uint64_t stbAddr = findNextStore(stkLoadAddr, end, getLoadDest(ldubInst));
+                 unsigned stbInst;
+ 
+                 if(stbAddr && isSTB((stbInst = m_pVM->readInstrFrmVm(stbAddr)))) {
+                             
+                     // All of the criteria have been met for Schema 2, the "stack transfer"
+                     // pattern.
+                         
+                     cand.setType(InstCandidate::STACK_XFER);
+                     cand.push_back(load);
+                     cand.push_back(store);
+                     cand.push_back(stkLoadAddr, ldubInst);
+                     cand.push_back(stbAddr, stbInst);
+                     return true;
+                 }
+             }
+         }
+     }
+ 
+     return false;
+ }
+ 
+ bool SparcInstManip::isCandidateLoad(uint64_t addr,
+                                 uint64_t end,
+                                 InstCandidate& cand) 
+ {
+     // {{{ Description of heuristic
+ 
+     // A candidate load is the first instruction in a sequence (with an arbitrary number
+     // of instructions in between elements of this sequence) that is a "signature" for the
+     // particular load of a volatile variable which needs to be replaced with a call to an
+     // instrumentation function.
+     //
+     // Detecting this candidacy condition is accomplished via the application of a
+     // relatively simple heurstic.  The signature sequence always begins with a "load
+     // half-word" and ends with a "store byte".  However, we cannot guarantee that the
+     // sequence looks like:
+     //
+     // lduh [mem1], %r[d]              |
+     // ...                             |  "Schema 1"
+     // stb %r[d], [mem2]               | 
+     //
+     // although this is a perfectly valid pattern to look for.  However, unoptimized code
+     // will frequently transfer this data using the stack, as in this instruction sequence:
+     //
+     // lduh [mem1] %r[d]               |  
+     // ...                             |
+     // sth  %r[d], [stack loc]         |
+     // ...                             |  "Schema 2"
+     // lduh [stack loc], %r[d']        |
+     // ...                             |
+     // stb %r[d'], [mem2]              |
+     //
+     // The current heurstic catches both of these patterns (designated "direct" and "stack
+     // transfer" respectively), and will be extended as insufficiencies in the heuristic
+     // are revealed.
+ 
+     // }}}
+     
+     // Address of potential candidate load is given by 'addr', maximum search address is
+     // given by 'end'
+     
+     unsigned inst = m_pVM->readInstrFrmVm(addr);
+     
+     if(isLoadHalfWord(inst)) {
+         // Search forward until a sth/stb from inst's target register is encountered
+         if(uint64_t storeAddr = findNextStore(addr, end, getLoadDest(inst))) {
+ 
+             // If STB, take actions for schema 1, otherwise check for schema 2 conditions.
+ 
+             unsigned storeInst = m_pVM->readInstrFrmVm(storeAddr);
+             std::pair<uint64_t, unsigned> inst1(addr, inst);
+             std::pair<uint64_t, unsigned> inst2(storeAddr, storeInst);
+ 
+             return determineSchema(cand, end, inst1, inst2);
+         }
+     }
+     
+     return false;
+ }
+ 
+ uint64_t SparcInstManip::findNextStackLoad(uint64_t addr,
+                                       uint64_t end,
+                                       unsigned fpOffset)
+ {
+     // Sweep the range of addresses starting at addr, up to end, looking for a load byte
+     // that is loading from [%fp + fpOffset]. Return the first such instance, or 0 is such
+     // an instance cannot be found.
+ 
+     for(uint64_t currAddr = addr; currAddr <= end; currAddr += getInstWidth()) {
+         unsigned inst = m_pVM->readInstrFrmVm(currAddr);
+ 
+         if(isLoadByte(inst) && isFPRelative(inst) && getFPOffset(inst) == fpOffset)
+             return currAddr;
+     }
+     
+     return 0;
+ }
+ 
+ uint64_t SparcInstManip::findNextStore(uint64_t addr,
+                                   uint64_t end,
+                                   unsigned srcReg) 
+ {
+     // Sweep the range of addresses starting at addr, up to end, looking for stb or sth
+     // instructions that are storing _from_ 'fromReg'.  Return the first such instance, or
+     // 0 if such an instance cannot be found.
+ 
+     for(uint64_t currAddr = addr; currAddr <= end; currAddr += getInstWidth()) {
+         unsigned inst = m_pVM->readInstrFrmVm(currAddr);
+         if(isSTH(inst) || isSTB(inst) && getStoreSrc(inst) == srcReg)
+             return currAddr;
+     }
+ 
+     return 0;
+ }


Index: llvm/lib/Reoptimizer/Inst/SparcInstManip.h
diff -c /dev/null llvm/lib/Reoptimizer/Inst/SparcInstManip.h:1.1
*** /dev/null	Tue Apr 29 22:08:13 2003
--- llvm/lib/Reoptimizer/Inst/SparcInstManip.h	Tue Apr 29 22:08:03 2003
***************
*** 0 ****
--- 1,179 ----
+ ////////////////
+ // programmer: Joel Stanley
+ //       date: Tue Apr 29 21:17:33 CDT 2003
+ //     fileid: SparcInstManip.h
+ //    purpose: Provides description SparcV9-specifc InstManip class. In particular,
+ //    SparcInstManip wraps the BinInterface/TraceCache macros and utilities.
+ //
+ 
+ #ifndef _INCLUDED_SPARCINSTMANIP_H
+ #define _INCLUDED_SPARCINSTMANIP_H
+ 
+ #include "llvm/Reoptimizer/BinInterface/sparcdis.h"
+ #include "llvm/Reoptimizer/InstrUtils.h" // getCallInstr, getUndepJumpInstr, etc.
+ 
+ #include "InstManip.h"
+ 
+ class SparcInstManip : public InstManip
+ {
+   public:
+     SparcInstManip(VirtualMem* vm);
+ 
+     // Offsets in stack frame for function parameters
+     enum StackOffset {
+         PARAM_0 = 128,
+         PARAM_1 = 136
+     };
+ 
+     virtual void     printRange(unsigned* start, unsigned* end) const;
+     virtual void     printInst(unsigned inst) const;
+ 
+     ////
+ 
+     uint64_t         skipFunctionHdr(uint64_t addr) const;
+                      
+     void             startCode(std::vector<unsigned>& snippet) { m_pCurrSnippet = &snippet; }
+     void             endCode()                                 { m_pCurrSnippet = 0;        }
+                      
+     void             generateAddressCopy(unsigned loadInst,
+                                          LogicalRegister dest,
+                                          bool afterSave);
+                      
+     void             generateBranchAlways(uint64_t dest,
+                                           uint64_t slotBase,
+                                           unsigned delaySlotInstr = NOP_INST);
+                      
+     void             generateCall(uint64_t dest, uint64_t slotBase);
+                      
+     void             generateLoad(uint64_t value,
+                                   LogicalRegister dest,
+                                   LogicalRegister tmp);
+                      
+     void             generateParamStore(LogicalRegister src, StackOffset off);
+                      
+     void             generateRestore();
+     void             generateSave();
+                      
+     void             generateRestoreShared(uint64_t restoreFromAddr,
+                                            LogicalRegister tmp1 = REG_0,
+                                            LogicalRegister tmp2 = REG_1);
+                      
+     void             generateSpillShared(uint64_t spillFromAddr,
+                                          LogicalRegister tmp1 = REG_0,
+                                          LogicalRegister tmp2 = REG_1);
+                      
+     void             findCandidates(uint64_t start,
+                                     uint64_t end,
+                                     std::vector<InstCandidate>& candidates);
+                      
+     unsigned         getRestoreInst() const;
+     inline unsigned  getBranchAlways(uint64_t dest, uint64_t pc, bool annulHigh = true) const;
+     inline unsigned  getCallInst(uint64_t dest, uint64_t pc) const;
+     inline bool      isBranch(unsigned inst) const;
+ 
+     // These are functions so when SparcInstManip is superclassed, they'd become virtual, etc.
+     // In the short term we could use class constants, but this is more clear.
+     
+     unsigned         getNOP() const                   { return NOP_INST;                       }
+     unsigned         getGenLoadSize() const           { return 6;                              }
+     unsigned         getGenCallSize() const           { return 2;                              }
+     unsigned         getGenBranchAlwaysSize() const   { return 2;                              }
+     unsigned         getGenSaveSize() const           { return 1;                              }
+     unsigned         getGenParamStoreSize() const     { return 1;                              }
+     unsigned         getGenSpillSharedSize() const    { return getGenLoadSize() + SHARED_SIZE; }
+     unsigned         getGenRestoreSharedSize() const  { return getGenLoadSize() + SHARED_SIZE; }
+     unsigned         getGenRestoreSize() const        { return 1;                              }
+     virtual unsigned getInstWidth() const             { return 4;                              }
+     unsigned         getSharedSize() const            { return SHARED_SIZE;                    }
+ 
+     inline unsigned  getGenAddressCopySize(unsigned loadInst) const;
+ 
+     uint64_t getPhase3SpillAddr() { return (uint64_t) sm_phase3SpillRegion; }
+ 
+   private:
+     SparcInstManip() {}
+     typedef std::map<unsigned, unsigned>        OutputToInputRegMap;
+ 
+     bool            isCandidateLoad(uint64_t addr,
+                                     uint64_t end,
+                                     InstCandidate& cand);
+ 
+     bool            determineSchema(InstCandidate& cand,
+                                     uint64_t end,
+                                     std::pair<uint64_t, unsigned>& load,
+                                     std::pair<uint64_t, unsigned>& store);
+ 
+     uint64_t        findNextStore(uint64_t addr,
+                                   uint64_t end,
+                                   unsigned srcReg);
+ 
+     uint64_t        findNextStackLoad(uint64_t addr,
+                                       uint64_t end,
+                                       unsigned fpOffset);
+     
+     std::vector<unsigned>* m_pCurrSnippet;       
+     OutputToInputRegMap    m_outputToInputReg;   // Maps input register -> output register
+ 
+     // Branch-always (annul bit high) instruction base (i.e., address not filled in yet)
+     static const unsigned BRANCH_ALWAYS_BASE_ANNUL;
+ 
+     // Branch-always (annul bit low) instruction base (i.e., address not filled in yet)
+     static const unsigned BRANCH_ALWAYS_BASE;
+ 
+     // NOP instruction
+     static const unsigned NOP_INST;
+ 
+     // Size (in number of 64-bit words) required for storing shared registers
+     static const unsigned SHARED_SIZE = 7;
+ 
+     // Sparc-specific constant used in SP manipulations
+     static const unsigned BIAS;
+ 
+     // Memory region into which to spill shared registers when executing a phase 4 slot
+     // (i.e., the slot that invokes the phase4 function, the slot written by phase 3
+     // invocations).  NB: One region is sufficient and we do not need stack semantics
+     // because only one activation of a phase 4 slot ever occurs at a given time (assuming
+     // single-threaded execution).
+ 
+     static uint64_t                     sm_phase3SpillRegion[SHARED_SIZE];
+ };
+ 
+ unsigned SparcInstManip::getBranchAlways(uint64_t dest, uint64_t pc, bool annul) const
+ {
+     // dest is the destination address, pc is the value of the program counter when the
+     // branch instruction is executed (i.e., the address of the branch instruction). NB:
+     // Only handles branch-always-annul-high at the moment
+ 
+     return getUndepJumpInstr(annul ? BRANCH_ALWAYS_BASE_ANNUL : BRANCH_ALWAYS_BASE,
+                              dest,
+                              pc);
+ }
+ 
+ unsigned SparcInstManip::getCallInst(uint64_t dest, uint64_t pc) const
+ {
+     // dest is the destination address to call, pc is the value of the program counter
+     // when the call instruction is executed (i.e., the address of the branch
+     // instruction).
+ 
+     return getCallInstr(dest, pc);
+ }
+ 
+ bool SparcInstManip::isBranch(unsigned inst) const
+ {
+     return ::isBranchInstr(inst);
+ }
+ 
+ unsigned SparcInstManip::getGenAddressCopySize(unsigned loadInst) const
+ {
+     // Determine the number of instructions required to load the address value used by the
+     // load instruction into some register.
+ 
+     // Case 1: load is immediate-valued --> add-immediate instruction needed, size is 1 inst
+     // Case 2: load is register-valued --> add-registers instruction needed, size is 1 inst
+ 
+     return 1;
+ }
+ 
+ #endif // _INCLUDED_SPARCINSTMANIP_H
+ 
+ 


Index: llvm/lib/Reoptimizer/Inst/InstManip.cpp
diff -u llvm/lib/Reoptimizer/Inst/InstManip.cpp:1.9 llvm/lib/Reoptimizer/Inst/InstManip.cpp:1.10
--- llvm/lib/Reoptimizer/Inst/InstManip.cpp:1.9	Tue Apr 29 21:08:42 2003
+++ llvm/lib/Reoptimizer/Inst/InstManip.cpp	Tue Apr 29 22:08:03 2003
@@ -6,17 +6,10 @@
 
 #include <iostream>
 #include <iomanip>
+
 #include "llvm/Reoptimizer/VirtualMem.h"
-#include "llvm/Reoptimizer/BinInterface/sparc9.h"
-#include "llvm/Reoptimizer/BinInterface/bitmath.h"
 #include "InstManip.h"
 
-const unsigned InstManip::NOP_INST = 0x01000000;
-const unsigned InstManip::BRANCH_ALWAYS_BASE = 0x10480000;
-const unsigned InstManip::BRANCH_ALWAYS_BASE_ANNUL = 0x30480000;
-const unsigned InstManip::BIAS = 2047;
-uint64_t InstManip::sm_phase3SpillRegion[InstManip::SHARED_SIZE];
-
 using std::cout;
 using std::cerr;
 using std::endl;
@@ -39,7 +32,8 @@
     for(std::vector<std::pair<uint64_t, unsigned> >::const_iterator i =
             m_insts.begin(), e = m_insts.end(); i != e; ++i) {
         ostr << std::hex << "  (" << i->first << ", " << std::flush;
-        sparc_print(i->second);
+        m_pIM->printInst(i->second);
+        //sparc_print(i->second); // FIXME
         fflush(stdout);
         ostr << ")" << endl;
     }
@@ -47,493 +41,11 @@
 }
 
 InstManip::InstManip(VirtualMem* vm):
-    m_pVM(vm),
-    m_pCurrSnippet(0)
+    m_pVM(vm)
 {
     assert(vm && "InstManip requires valid VirtualMem instance");
-
-    // Populate logical->actual register map. Since this InstManip class is
-    // SparcV9-specific, we map to the values used by the BinInterface library and macros.
-
-    m_logicalToActualReg[REG_0] = R_O0;
-    m_logicalToActualReg[REG_1] = R_O1;
-    m_logicalToActualReg[REG_2] = R_O2;
-
-    // Populate output->input register map. This is SparcV9 specific and corresponds to
-    // the register mapping that occurs after a 'save' instruction is issued. Shared and
-    // local registers map to themselves.
-
-    m_outputToInputReg[R_O0] = R_I0;
-    m_outputToInputReg[R_O1] = R_I1;
-    m_outputToInputReg[R_O2] = R_I2;
-    m_outputToInputReg[R_O3] = R_I3;
-    m_outputToInputReg[R_O4] = R_I4;
-    m_outputToInputReg[R_O5] = R_I5;
-    m_outputToInputReg[R_O6] = R_I6;
-    m_outputToInputReg[R_O7] = R_I7;
-
-    for(unsigned i = R_G0; i <= R_G7; ++i)
-        m_outputToInputReg[i] = i;
-    for(unsigned i = R_L0; i <= R_L7; ++i)
-        m_outputToInputReg[i] = i;
-}
-
-void InstManip::printRange(unsigned* start,
-                           unsigned* end) const
-{
-    // Dumps contents (and corresponding disassembly) of memory range given by range
-    // to stdout.  TODO: Parameterize by an ostream instance; cannot do this yet
-    // because BinInterface is hard-coded to use printf and must be changed.
-        
-    cout << "Sparc dissassembly of range ["
-              << start << ", " << end << "]:" << endl;
-
-    for(; start <= end; ++start) {
-        cout << start << " | " 
-                  << std::hex << std::setw(8) << std::setfill('0')
-                  << *start << " | ";
-        sparc_print(*start);
-        cout << endl;
-    }
-}
-
-uint64_t InstManip::skipFunctionHdr(uint64_t addr) const
-{
-    // For SparcV9, what we're calling the "function header" is the save instruction (if
-    // present) that occurs as the first instruction of the function.
-    
-    unsigned inst = m_pVM->readInstrFrmVm(addr);
-    assert(RD_FLD(inst, INSTR_OP) == OP_2 &&
-           RD_FLD(inst, INSTR_OP3) == OP3_SAVE &&
-           "Unhandled case: non-save instruction in function header");
-    
-    return addr + getInstWidth();
-}
-
-void InstManip::generateLoad(uint64_t value,
-                             LogicalRegister dest,
-                             LogicalRegister tmp)
-{
-    // When reg == REG_0, load the 64-bit value into %o0, using %o0 and %o1.
-    // When reg == REG_1, load the 64-bit value into %o1, using %o1 and %o2.
-    // The sequence of instructions is placed into the provided instruction vector.
-
-    assert(m_pCurrSnippet && "Invalid snippet for code generation");
-    assert(dest != tmp && "Distinct logical registers required");
-    std::vector<unsigned>& snippet = *m_pCurrSnippet;
-    
-    unsigned initSize = snippet.size();
-    unsigned destReg = m_logicalToActualReg[dest];
-    unsigned tmpReg = m_logicalToActualReg[tmp];
-    
-    // sethi (upper 22b of upper wrd), %destReg
-    snippet.push_back(MK_SETHI(destReg, HIGH22(HIGHWORD(value))));
-
-    // or %o0, (lower 10b of upper wrd), %destReg
-    snippet.push_back(MK_LOGIC_IMM(OP3_OR, destReg, destReg, LOW10(HIGHWORD(value))));
-
-    // sllx %o0, 32, %destReg
-    snippet.push_back(MK_SHIFTX(OP3_SLL, destReg, destReg, 32));
-
-    // sethi (upper 22b of lwr wrd), %tmpReg
-    snippet.push_back(MK_SETHI(tmpReg, HIGH22(LOWWORD(value))));
-
-    // or %destReg, %tmpReg, %destReg
-    snippet.push_back(MK_LOGIC(OP3_OR, destReg, destReg, tmpReg));
-
-    // add %destReg, (lwr 10b of lwr wrd), %destReg
-    snippet.push_back(MK_ADD_R_I(destReg, destReg, LOW10(LOWWORD(value))));
-
-    assert(snippet.size() - initSize == getGenLoadSize() &&
-           "Unexpected number of instructions in code sequence for 64-bit value -> %dest");
-}
-
-void InstManip::generateAddressCopy(unsigned loadInst,
-                                    LogicalRegister dest,
-                                    bool afterSave)
-{
-    // NB: After save instruction has been issued, the output registers are mapped to the
-    // input registers.  
-
-    assert(m_pCurrSnippet && "Invalid snippet for code generation");
-    std::vector<unsigned>& snippet = *m_pCurrSnippet;
-
-    unsigned initSize = snippet.size();
-    unsigned destReg = m_logicalToActualReg[dest];
-    unsigned rs1 = RD_FLD(loadInst, INSTR_RS1);
-
-    if(afterSave)
-        rs1 = m_outputToInputReg[rs1];
-
-    if(RD_FLD(loadInst, INSTR_I)) {
-        // Case 1: load is immediate-valued --> reg, imm value add instruction needed
-        unsigned imm = RD_FLD(loadInst, INSTR_SIMM13);
-        snippet.push_back(MK_ADD_R_I(destReg, rs1, imm));
-    }
-    else {
-        // Case 2: load is register-valued --> reg, reg add instruction needed
-        unsigned rs2 = RD_FLD(loadInst, INSTR_RS2);
-
-        if(afterSave)
-            rs2 = m_outputToInputReg[rs2];
-        
-        snippet.push_back(MK_ADD_R_R(destReg, rs1, rs2));
-    }
-
-    assert(snippet.size() - initSize == getGenAddressCopySize(loadInst) &&
-           "Unexpected number of instructions in code sequence for address copy");
-}
-
-void InstManip::generateParamStore(LogicalRegister src,
-                                   StackOffset off)
-{
-    assert(m_pCurrSnippet && "Invalid snippet for code generation");
-    std::vector<unsigned>& snippet = *m_pCurrSnippet;
-
-    unsigned initSize = snippet.size();
-    unsigned srcReg = m_logicalToActualReg[src];
-
-    snippet.push_back(MK_STX_STACK(srcReg, BIAS + off));
-
-    assert(snippet.size() - initSize == getGenParamStoreSize() &&
-           "Unexpected number of instructions in code sequence for parameter store");
-}
-
-void InstManip::generateCall(uint64_t dest,
-                             uint64_t slotBase)
-{
-    assert(m_pCurrSnippet && "Invalid snippet for code generation");
-    std::vector<unsigned>& snippet = *m_pCurrSnippet;
-
-    unsigned initSize = snippet.size();
-    
-    // Calculate address of call instruction from slotBase
-    uint64_t callInstAddr = slotBase + getInstWidth() * snippet.size();
-
-    // Add call instruction and nop (for call delay slot) to code snippet.
-    snippet.push_back(getCallInst(dest, callInstAddr));
-    snippet.push_back(NOP_INST);
-
-    assert(snippet.size() - initSize == getGenCallSize() &&
-           "Unexpected number of instructions in code sequence for call");
-}
-
-unsigned InstManip::getRestoreInst() const
-{
-    // restore %g0, 0, %g0
-    return MK_RESTORE_IMM(R_G0, R_G0, 0);
-}
-
-void InstManip::generateRestore()
-{
-    assert(m_pCurrSnippet && "Invalid snippet for code generation");
-    std::vector<unsigned>& snippet = *m_pCurrSnippet;
-
-    unsigned initSize = snippet.size();
-
-    snippet.push_back(getRestoreInst());
-
-    assert(snippet.size() - initSize == getGenRestoreSize() &&
-           "Unexpected number of instructions in code sequence for restore");
-}
-
-void InstManip::generateSave()
-{
-    assert(m_pCurrSnippet && "Invalid snippet for code generation");
-    std::vector<unsigned>& snippet = *m_pCurrSnippet;
-
-    unsigned initSize = snippet.size();    
-
-    // save %sp, -176, %sp
-    snippet.push_back(MK_SAVE_IMM(R_O6, R_O6, -176));
-
-    assert(snippet.size() - initSize == getGenSaveSize() &&
-           "Unexpected number of instructions in code sequence for save");
-}
-
-// TODO: It will be worthwhile to generate calls to functions that spill/restore the
-// shared registers instead of dumping all of the code into the current snippet.
-
-void InstManip::generateRestoreShared(uint64_t restoreFromAddr,
-                                      LogicalRegister tmp1,
-                                      LogicalRegister tmp2) 
-{
-    assert(m_pCurrSnippet && "Invalid snippet for code generation");
-    assert(tmp1 != tmp2 && "Distinct logical registers required");
-
-    std::vector<unsigned>& snippet = *m_pCurrSnippet;
-    unsigned initSize = snippet.size();
-    unsigned tmpReg = m_logicalToActualReg[tmp1];
-
-    generateLoad(restoreFromAddr, tmp1, tmp2);
-    snippet.push_back(MK_LOAD_IMM(R_G1, tmpReg, 8));
-    snippet.push_back(MK_LOAD_IMM(R_G2, tmpReg, 16));
-    snippet.push_back(MK_LOAD_IMM(R_G3, tmpReg, 24));
-    snippet.push_back(MK_LOAD_IMM(R_G4, tmpReg, 32));
-    snippet.push_back(MK_LOAD_IMM(R_G5, tmpReg, 40));
-    snippet.push_back(MK_LOAD_IMM(R_G6, tmpReg, 48));
-    snippet.push_back(MK_LOAD_IMM(R_G7, tmpReg, 56));
-
-    assert(snippet.size() - initSize == getGenRestoreSharedSize() &&
-           "Unexpected number of instructions in code sequence for restore shared");
 }
 
-void InstManip::generateSpillShared(uint64_t spillToAddr,
-                                    LogicalRegister tmp1,
-                                    LogicalRegister tmp2) 
+InstManip::~InstManip()
 {
-    assert(m_pCurrSnippet && "Invalid snippet for code generation");
-    assert(tmp1 != tmp2 && "Distinct logical registers required");
-
-    std::vector<unsigned>& snippet = *m_pCurrSnippet;
-    unsigned initSize = snippet.size();    
-    unsigned tmpReg = m_logicalToActualReg[tmp1];
-
-    generateLoad(spillToAddr, tmp1, tmp2);
-    snippet.push_back(MK_STORE_IMM(R_G1, tmpReg, 8));
-    snippet.push_back(MK_STORE_IMM(R_G2, tmpReg, 16));
-    snippet.push_back(MK_STORE_IMM(R_G3, tmpReg, 24));
-    snippet.push_back(MK_STORE_IMM(R_G4, tmpReg, 32));
-    snippet.push_back(MK_STORE_IMM(R_G5, tmpReg, 40));
-    snippet.push_back(MK_STORE_IMM(R_G6, tmpReg, 48));
-    snippet.push_back(MK_STORE_IMM(R_G7, tmpReg, 56));
-
-    assert(snippet.size() - initSize == getGenSpillSharedSize() &&
-           "Unexpected number of instructions in code sequence for spill shared");
-}
-
-void InstManip::generateBranchAlways(uint64_t dest,
-                                     uint64_t slotBase,
-                                     unsigned delaySlotInstr)
-{
-    assert(m_pCurrSnippet && "Invalid snippet for code generation");
-    std::vector<unsigned>& snippet = *m_pCurrSnippet;
-
-    unsigned initSize = snippet.size();
-    
-    // Calculate address of branch instruction from slotBase
-    uint64_t branchInstAddr = slotBase + getInstWidth() * snippet.size();
-
-    // Add branch instruction and the specified delay slot instruction to code snippet.
-    snippet.push_back(getBranchAlways(dest, branchInstAddr, false)); // annul bit low
-    snippet.push_back(delaySlotInstr);
-
-    assert(snippet.size() - initSize == getGenBranchAlwaysSize() &&
-           "Unexpected number of instruction in code sequence for branch-always");
-}
-
-void InstManip::findCandidates(uint64_t start,
-                               uint64_t end,
-                               std::vector<InstCandidate>& candidates) 
-{
-    for(uint64_t currAddr = start; currAddr <= end; currAddr += getInstWidth()) {
-        InstCandidate cand;
-        if(isCandidateLoad(currAddr, end, cand))
-            candidates.push_back(cand);
-    }
-}
-
-static inline bool isLoadHalfWord(unsigned inst)
-{
-    // Returns true if inst is an LDUH instruction
-    return RD_FLD(inst, INSTR_OP) == OP_3 &&
-        RD_FLD(inst, INSTR_OP3) == OP3_LDUH;
-}
-
-static inline bool isLoadByte(unsigned inst) 
-{
-    // Returns true if inst is a LDUB instruction
-    return RD_FLD(inst, INSTR_OP) == OP_3 &&
-        RD_FLD(inst, INSTR_OP3) == OP3_LDUB;
-}
-
-static inline bool isFPRelative(unsigned inst) 
-{
-    return RD_FLD(inst, INSTR_RS1) == R_BP && RD_FLD(inst, INSTR_I) == 1;
-}
-
-static inline bool isSTH(unsigned inst) 
-{
-    return RD_FLD(inst, INSTR_OP) == OP_3 &&
-        RD_FLD(inst, INSTR_OP3) == OP3_STH;
-}
-
-static inline bool isSTB(unsigned inst) 
-{
-    return RD_FLD(inst, INSTR_OP) == OP_3 &&
-        RD_FLD(inst, INSTR_OP3) == OP3_STB;
-}
-
-static inline unsigned getLoadDest(unsigned inst) 
-{
-    // Assumes that inst is a load instruction, and returns the register ID of its
-    // destination operand.
-
-    return RD_FLD(inst, INSTR_RD);
-}
-
-static inline unsigned getStoreSrc(unsigned inst)
-{
-    // Assumes that inst is a stb/sth instruction, and returns the register ID of its
-    // source operand (by source, we don't mean rs1 or rs2, but rather rd, which specifies
-    // the register which contains the value being stored); 
-
-    return RD_FLD(inst, INSTR_RD);
-}
-
-static inline unsigned getFPOffset(unsigned inst) 
-{
-    assert(isFPRelative(inst) && "Expect instruction to be FP-relative");
-    return RD_FLD(inst, INSTR_SIMM13);
-}
-
-bool InstManip::determineSchema(InstCandidate& cand,
-                                uint64_t end,
-                                std::pair<uint64_t, unsigned>& load,
-                                std::pair<uint64_t, unsigned>& store)
-{
-    // inst1 contains the load instruction (the actual candidate). inst2 contains the
-    // corresponding store instruction, which is either STB or STH.  If STB, take actions
-    // for schema 1, and if STH, schema 2.
-    
-    if(isSTB(store.second)) {
-        // Schema 1: "direct" pattern
-        cand.setType(InstCandidate::DIRECT);
-        cand.push_back(load);
-        cand.push_back(store);
-        return true;
-    }
-    else {
-        assert(isSTH(store.second) && "Instruction must be STH");
-
-        // We have potentially discovered an instance of schema 2, but must search
-        // more to determine if this is the case.
-        // 
-        // KIS heuristic concession: The STH given by storeInst *must* be storing to the stack
-        // in an fp-relative manner; if not, we deny the originating load's candidacy.
-                
-        if(isFPRelative(store.second)) {
-            // Search forward until a LDUB from same stack location (+1) as the STH wrote to
-            // is encountered.  The +1 in specified in the FP offset we're searching for is
-            // due to the fact that we stored a half-word but are loading a byte.
-
-            if(uint64_t stkLoadAddr = findNextStackLoad(store.first, end, getFPOffset(store.second) + 1)) {
-                // Last schema-2 search: find the STB instruction that stores from the
-                // LDUB's destination register.
-                        
-                unsigned ldubInst = m_pVM->readInstrFrmVm(stkLoadAddr);
-                uint64_t stbAddr = findNextStore(stkLoadAddr, end, getLoadDest(ldubInst));
-                unsigned stbInst;
-
-                if(stbAddr && isSTB((stbInst = m_pVM->readInstrFrmVm(stbAddr)))) {
-                            
-                    // All of the criteria have been met for Schema 2, the "stack transfer"
-                    // pattern.
-                        
-                    cand.setType(InstCandidate::STACK_XFER);
-                    cand.push_back(load);
-                    cand.push_back(store);
-                    cand.push_back(stkLoadAddr, ldubInst);
-                    cand.push_back(stbAddr, stbInst);
-                    return true;
-                }
-            }
-        }
-    }
-
-    return false;
-}
-
-bool InstManip::isCandidateLoad(uint64_t addr,
-                                uint64_t end,
-                                InstCandidate& cand) 
-{
-    // {{{ Description of heuristic
-
-    // A candidate load is the first instruction in a sequence (with an arbitrary number
-    // of instructions in between elements of this sequence) that is a "signature" for the
-    // particular load of a volatile variable which needs to be replaced with a call to an
-    // instrumentation function.
-    //
-    // Detecting this candidacy condition is accomplished via the application of a
-    // relatively simple heurstic.  The signature sequence always begins with a "load
-    // half-word" and ends with a "store byte".  However, we cannot guarantee that the
-    // sequence looks like:
-    //
-    // lduh [mem1], %r[d]              |
-    // ...                             |  "Schema 1"
-    // stb %r[d], [mem2]               | 
-    //
-    // although this is a perfectly valid pattern to look for.  However, unoptimized code
-    // will frequently transfer this data using the stack, as in this instruction sequence:
-    //
-    // lduh [mem1] %r[d]               |  
-    // ...                             |
-    // sth  %r[d], [stack loc]         |
-    // ...                             |  "Schema 2"
-    // lduh [stack loc], %r[d']        |
-    // ...                             |
-    // stb %r[d'], [mem2]              |
-    //
-    // The current heurstic catches both of these patterns (designated "direct" and "stack
-    // transfer" respectively), and will be extended as insufficiencies in the heuristic
-    // are revealed.
-
-    // }}}
-    
-    // Address of potential candidate load is given by 'addr', maximum search address is
-    // given by 'end'
-    
-    unsigned inst = m_pVM->readInstrFrmVm(addr);
-    
-    if(isLoadHalfWord(inst)) {
-        // Search forward until a sth/stb from inst's target register is encountered
-        if(uint64_t storeAddr = findNextStore(addr, end, getLoadDest(inst))) {
-
-            // If STB, take actions for schema 1, otherwise check for schema 2 conditions.
-
-            unsigned storeInst = m_pVM->readInstrFrmVm(storeAddr);
-            std::pair<uint64_t, unsigned> inst1(addr, inst);
-            std::pair<uint64_t, unsigned> inst2(storeAddr, storeInst);
-
-            return determineSchema(cand, end, inst1, inst2);
-        }
-    }
-    
-    return false;
-}
-
-uint64_t InstManip::findNextStackLoad(uint64_t addr,
-                                      uint64_t end,
-                                      unsigned fpOffset)
-{
-    // Sweep the range of addresses starting at addr, up to end, looking for a load byte
-    // that is loading from [%fp + fpOffset]. Return the first such instance, or 0 is such
-    // an instance cannot be found.
-
-    for(uint64_t currAddr = addr; currAddr <= end; currAddr += getInstWidth()) {
-        unsigned inst = m_pVM->readInstrFrmVm(currAddr);
-
-        if(isLoadByte(inst) && isFPRelative(inst) && getFPOffset(inst) == fpOffset)
-            return currAddr;
-    }
-    
-    return 0;
-}
-
-uint64_t InstManip::findNextStore(uint64_t addr,
-                                  uint64_t end,
-                                  unsigned srcReg) 
-{
-    // Sweep the range of addresses starting at addr, up to end, looking for stb or sth
-    // instructions that are storing _from_ 'fromReg'.  Return the first such instance, or
-    // 0 if such an instance cannot be found.
-
-    for(uint64_t currAddr = addr; currAddr <= end; currAddr += getInstWidth()) {
-        unsigned inst = m_pVM->readInstrFrmVm(currAddr);
-        if(isSTH(inst) || isSTB(inst) && getStoreSrc(inst) == srcReg)
-            return currAddr;
-    }
-
-    return 0;
 }


Index: llvm/lib/Reoptimizer/Inst/InstManip.h
diff -u llvm/lib/Reoptimizer/Inst/InstManip.h:1.10 llvm/lib/Reoptimizer/Inst/InstManip.h:1.11
--- llvm/lib/Reoptimizer/Inst/InstManip.h:1.10	Tue Apr 29 21:08:42 2003
+++ llvm/lib/Reoptimizer/Inst/InstManip.h	Tue Apr 29 22:08:03 2003
@@ -2,24 +2,63 @@
 // programmer: Joel Stanley
 //       date: Tue Apr  8 22:42:14 CDT 2003
 //     fileid: InstManip.h
-//     purpose: InstManip is a wrapper class around any BinInterface macros/mechanisms, as
-//     well as the TraceCache "instruction utilities", all which are (currently)
-//     SparcV9-specific.  This class exists both for conceptual clarity and to facilitate
-//     the hiding of Sparc-specific code from the Phase 2-4 actions (and thus making it
-//     easier to use the transformations on other platforms in the future; we should be
-//     able to change which instruction manipulator object is instantiated, after making
-//     the appropriate superclass, etc). 
+//     purpose: InstManip is a (pure virtual) class that hdies platform-specific
+//     instruction manipulation behind a common interface, and provides clients with
+//     various instruction manipulation utilities. Only two relevant assumptions are made:
+//
+//     * The TraceCache objects (TraceCache, MemoryManager, VirtualMem, etc) from the
+//     Reoptimizer library work in an appropriate manner on the given platform.
+//
+//     * uint64_t is used for addresses, and unsigned is used for instruction words.
+//
+//     Better parameterization of type attributes (perhaps by making it a template class?)
+//     is on the TODO list.  This is currently difficult because the aforementioned
+//     Reoptimizer classes are not parameterized.
 
 #ifndef _INCLUDED_INSTMANIP_H
 #define _INCLUDED_INSTMANIP_H
 
 #include <vector>
 #include <algorithm>
-#include "llvm/Reoptimizer/BinInterface/sparcdis.h"
-#include "llvm/Reoptimizer/InstrUtils.h" // getCallInstr, getUndepJumpInstr, etc.
 
 class VirtualMem;
 
+class InstManip 
+{
+  public:
+    InstManip(VirtualMem* vm);
+    virtual ~InstManip();
+
+    // Logical registers used by clients of this class, mapped to machine-specific IDs
+    // by the logical -> actual register map.
+    enum LogicalRegister {
+        REG_0,
+        REG_1,
+        REG_2
+    };
+
+    virtual void     printRange(unsigned* start, unsigned* end) const = 0;
+    virtual void     printInst(unsigned inst) const = 0;
+    virtual unsigned getInstWidth() const = 0;
+
+    inline void      printRange(uint64_t start, uint64_t end) const;
+
+  protected:
+    InstManip() {}
+    
+    typedef std::map<LogicalRegister, unsigned> LogicalToActualRegMap;
+
+    LogicalToActualRegMap  m_logicalToActualReg; // Maps logical -> actual register
+    VirtualMem*            m_pVM;
+};
+
+void InstManip::printRange(uint64_t start, uint64_t end) const
+{
+    printRange((unsigned*) start, (unsigned*) end);
+}
+
+////////////////
+
 // InstCandidate is a class that represents a location in the code that is determined to
 // be a candidate for instrumentation.  Because the transformation action required for a
 // particular candidate requires auxiliary information (such as other instructions found
@@ -31,8 +70,9 @@
   public:
     enum CandType { DIRECT, STACK_XFER };
     
-    InstCandidate() {}
-    InstCandidate(CandType type): m_type(type) {}
+    InstCandidate(): m_pIM(0) {}
+    InstCandidate(InstManip* pIM): m_pIM(pIM) {}
+    InstCandidate(InstManip* pIM, CandType type): m_pIM(pIM), m_type(type) {}
 
     void setType(CandType type) { m_type = type;               }
     bool isDirect() const       { return m_type == DIRECT;     }
@@ -66,203 +106,15 @@
     void print(std::ostream& ostr) const;
 
   protected:
-    CandType m_type;
+    InstManip* m_pIM;
+    CandType   m_type;
 
     // Each element of this vector holds a (address, inst) pair.
     std::vector<std::pair<uint64_t, unsigned> > m_insts;
-};
-
-std::ostream& operator<<(std::ostream& ostr, const InstCandidate& cand);
-
-class InstManip 
-{
-  public:
-    InstManip(VirtualMem* vm);
-
-    typedef std::pair<uint64_t, unsigned> Inst; // (location, inst word) pair
-    
-    // Logical registers used by clients of this class, mapped to machine-specific IDs
-    // by the logical -> actual register map.
-    enum LogicalRegister {
-        REG_0,
-        REG_1,
-        REG_2
-    };
 
-    // Offsets in stack frame for function parameters
-    enum StackOffset {
-        PARAM_0 = 128,
-        PARAM_1 = 136
-    }; 
-    
-    void            printRange(unsigned* start, unsigned* end) const;
-    inline void     printRange(uint64_t start, uint64_t end) const;
-                    
-    inline void     printInst(unsigned inst) const;
-    inline void     printInst(unsigned* instAddr) const;
-                    
-    uint64_t        skipFunctionHdr(uint64_t addr) const;
-                    
-    void            startCode(std::vector<unsigned>& snippet) { m_pCurrSnippet = &snippet; }
-    void            endCode()                                 { m_pCurrSnippet = 0;        }
-
-    void            generateAddressCopy(unsigned loadInst,
-                                        LogicalRegister dest,
-                                        bool afterSave);
-
-    void            generateBranchAlways(uint64_t dest,
-                                         uint64_t slotBase,
-                                         unsigned delaySlotInstr = NOP_INST);
-
-    void            generateCall(uint64_t dest, uint64_t slotBase);
-
-    void            generateLoad(uint64_t value,
-                                 LogicalRegister dest,
-                                 LogicalRegister tmp);
-
-    void            generateParamStore(LogicalRegister src, StackOffset off);
-
-    void            generateRestore();
-    void            generateSave();
-
-    void            generateRestoreShared(uint64_t restoreFromAddr,
-                                          LogicalRegister tmp1 = REG_0,
-                                          LogicalRegister tmp2 = REG_1);
-
-    void            generateSpillShared(uint64_t spillFromAddr,
-                                        LogicalRegister tmp1 = REG_0,
-                                        LogicalRegister tmp2 = REG_1);
-
-    void            findCandidates(uint64_t start,
-                                   uint64_t end,
-                                   std::vector<InstCandidate>& candidates);
-
-    unsigned        getRestoreInst() const;
-    inline unsigned getBranchAlways(uint64_t dest, uint64_t pc, bool annulHigh = true) const;
-    inline unsigned getCallInst(uint64_t dest, uint64_t pc) const;
-    inline bool     isBranch(unsigned inst) const;
-
-    // These are functions so when InstManip is superclassed, they'd become virtual, etc.
-    // In the short term we could use class constants, but this is more clear.
-    
-    unsigned        getNOP() const                   { return NOP_INST;                       }
-    unsigned        getGenLoadSize() const           { return 6;                              }
-    unsigned        getGenCallSize() const           { return 2;                              }
-    unsigned        getGenBranchAlwaysSize() const   { return 2;                              }
-    unsigned        getGenSaveSize() const           { return 1;                              }
-    unsigned        getGenParamStoreSize() const     { return 1;                              }
-    unsigned        getGenSpillSharedSize() const    { return getGenLoadSize() + SHARED_SIZE; }
-    unsigned        getGenRestoreSharedSize() const  { return getGenLoadSize() + SHARED_SIZE; }
-    unsigned        getGenRestoreSize() const        { return 1;                              }
-    unsigned        getInstWidth() const             { return 4;                              }
-    unsigned        getSharedSize() const            { return SHARED_SIZE;                    }
-
-    inline unsigned getGenAddressCopySize(unsigned loadInst) const;
-
-    uint64_t getPhase3SpillAddr() { return (uint64_t) sm_phase3SpillRegion; }
-
-  private:
-    InstManip() {}
-    typedef std::map<LogicalRegister, unsigned> LogicalToActualRegMap;
-    typedef std::map<unsigned, unsigned>        OutputToInputRegMap;
-
-    bool            isCandidateLoad(uint64_t addr,
-                                    uint64_t end,
-                                    InstCandidate& cand);
-
-    bool            determineSchema(InstCandidate& cand,
-                                    uint64_t end,
-                                    std::pair<uint64_t, unsigned>& load,
-                                    std::pair<uint64_t, unsigned>& store);
-
-    uint64_t        findNextStore(uint64_t addr,
-                                  uint64_t end,
-                                  unsigned srcReg);
-
-    uint64_t        findNextStackLoad(uint64_t addr,
-                                      uint64_t end,
-                                      unsigned fpOffset);
-    
-    VirtualMem*            m_pVM;
-    std::vector<unsigned>* m_pCurrSnippet;       
-    LogicalToActualRegMap  m_logicalToActualReg; // Maps logical -> actual register 
-    OutputToInputRegMap    m_outputToInputReg;   // Maps input register -> output register
-
-    // Branch-always (annul bit high) instruction base (i.e., address not filled in yet)
-    static const unsigned BRANCH_ALWAYS_BASE_ANNUL;
-
-    // Branch-always (annul bit low) instruction base (i.e., address not filled in yet)
-    static const unsigned BRANCH_ALWAYS_BASE;
-
-    // NOP instruction
-    static const unsigned NOP_INST;
-
-    // Size (in number of 64-bit words) required for storing shared registers
-    static const unsigned SHARED_SIZE = 7;
-
-    // Sparc-specific constant used in SP manipulations
-    static const unsigned BIAS;
-
-    // Memory region into which to spill shared registers when executing a phase 4 slot
-    // (i.e., the slot that invokes the phase4 function, the slot written by phase 3
-    // invocations).  NB: One region is sufficient and we do not need stack semantics
-    // because only one activation of a phase 4 slot ever occurs at a given time (assuming
-    // single-threaded execution).
-
-    static uint64_t                     sm_phase3SpillRegion[SHARED_SIZE];
 };
 
-void InstManip::printRange(uint64_t start, uint64_t end) const
-{
-    printRange((unsigned*) start, (unsigned*) end);
-}
-
-void InstManip::printInst(unsigned inst) const
-{
-    sparc_print(inst);
-    fflush(stdout);
-}
-
-void InstManip::printInst(unsigned* instAddr) const
-{
-    sparc_print(*instAddr);
-    fflush(stdout);
-}
-
-unsigned InstManip::getBranchAlways(uint64_t dest, uint64_t pc, bool annul) const
-{
-    // dest is the destination address, pc is the value of the program counter when the
-    // branch instruction is executed (i.e., the address of the branch instruction). NB:
-    // Only handles branch-always-annul-high at the moment
-
-    return getUndepJumpInstr(annul ? BRANCH_ALWAYS_BASE_ANNUL : BRANCH_ALWAYS_BASE,
-                             dest,
-                             pc);
-}
-
-unsigned InstManip::getCallInst(uint64_t dest, uint64_t pc) const
-{
-    // dest is the destination address to call, pc is the value of the program counter
-    // when the call instruction is executed (i.e., the address of the branch
-    // instruction).
-
-    return getCallInstr(dest, pc);
-}
-
-bool InstManip::isBranch(unsigned inst) const
-{
-    return ::isBranchInstr(inst);
-}
-
-unsigned InstManip::getGenAddressCopySize(unsigned loadInst) const
-{
-    // Determine the number of instructions required to load the address value used by the
-    // load instruction into some register.
-
-    // Case 1: load is immediate-valued --> add-immediate instruction needed, size is 1 inst
-    // Case 2: load is register-valued --> add-registers instruction needed, size is 1 inst
-
-    return 1;
-}
+std::ostream& operator<<(std::ostream& ostr, const InstCandidate& cand);
 
 #endif // _INCLUDED_INSTMANIP_H
+


Index: llvm/lib/Reoptimizer/Inst/Phases.cpp
diff -u llvm/lib/Reoptimizer/Inst/Phases.cpp:1.15 llvm/lib/Reoptimizer/Inst/Phases.cpp:1.16
--- llvm/lib/Reoptimizer/Inst/Phases.cpp:1.15	Tue Apr 29 21:08:42 2003
+++ llvm/lib/Reoptimizer/Inst/Phases.cpp	Tue Apr 29 22:08:03 2003
@@ -79,7 +79,8 @@
 #include "llvm/Reoptimizer/MemoryManager.h"
 
 #include "ElfReader.h"
-#include "InstManip.h"
+//#include "InstManip.h"
+#include "SparcInstManip.h"
 
 using std::vector;
 using std::cerr;
@@ -108,13 +109,15 @@
                uint64_t replaceAddr,
                uint64_t slotDescriptor,
                unsigned slotSize,
-               TraceCache* pTraceCache):
+               TraceCache* pTraceCache,
+               SparcInstManip* pInstManip):
         m_addrRange(addressRange),
         m_origInst(origInst),
         m_replaceAddr(replaceAddr),
         m_slotDescriptor(slotDescriptor),
         m_slotSize(slotSize),
-        m_pTraceCache(pTraceCache)
+        m_pTraceCache(pTraceCache),
+        m_pInstManip(pInstManip)
     {
     }
 
@@ -134,7 +137,8 @@
     uint64_t    getReplaceAddr() const { return m_replaceAddr;      }  
     uint64_t    getSlot() const        { return m_slotDescriptor;   }
     uint64_t    getSlotSize() const    { return m_slotSize;         }
-    TraceCache* getTraceCache()        { return m_pTraceCache;      }   
+    TraceCache* getTraceCache()        { return m_pTraceCache;      }
+    SparcInstManip* getIM()            { return m_pInstManip;       }
 
   private:
     Phase3Info() {}
@@ -145,6 +149,7 @@
     uint64_t     m_slotDescriptor; // Slot created by phase 2
     unsigned     m_slotSize;       // Size of slot created by phase 2
     TraceCache*  m_pTraceCache;    // TraceCache instance used by phase 2
+    SparcInstManip* m_pInstManip;  // The InstManip instance to pass to the next phase
 };
 
 class Phase4Info
@@ -153,18 +158,21 @@
     Phase4Info(const InstCandidate& candidate,
                uint64_t slotDescriptor,
                uint64_t slotSize,
-               TraceCache* pTraceCache):
+               TraceCache* pTraceCache,
+               SparcInstManip* pInstManip):
         m_candidate(candidate),
         m_slotDescriptor(slotDescriptor),
         m_slotSize(slotSize),
-        m_pTraceCache(pTraceCache)
+        m_pTraceCache(pTraceCache),
+        m_pInstManip(pInstManip)
     {
     }
 
     const InstCandidate& getCandidate() const { return m_candidate;        }
     uint64_t             getSlot() const      { return m_slotDescriptor;   }
     uint64_t             getSlotSize() const  { return m_slotSize;         }
-    TraceCache*          getTraceCache()      { return m_pTraceCache;      }   
+    TraceCache*          getTraceCache()      { return m_pTraceCache;      }
+    SparcInstManip*      getIM()              { return m_pInstManip;       }
 
   private:
     Phase4Info() {}
@@ -173,6 +181,7 @@
     uint64_t      m_slotDescriptor; // Slot created by phase 3
     unsigned      m_slotSize;       // Size of slot created by phase 3
     TraceCache*   m_pTraceCache;    // TraceCache instance used by phases 2 and 3
+    SparcInstManip* m_pInstManip;   // The InstManip instance to pass to the next phase
 };
 
 void phase3(Phase3Info* p3info);
@@ -184,16 +193,16 @@
 class Phase2 
 {
   public:
-    Phase2(TraceCache* pTraceCache);
+    Phase2(TraceCache* pTraceCache, SparcInstManip* pIM);
     void transform();
     void transformFunction(AddressRange& range);
 
   private:
-    Phase2(): m_instManip(0) {}
+    Phase2() {}
     inline unsigned getSlotSize() const;
     
     TraceCache*     m_pTraceCache;
-    InstManip       m_instManip;
+    SparcInstManip*      m_pInstManip;
 
     static uint64_t* sm_pSpillRegion; // Base pointer to the spill region for phase 3 invocations
     static uint64_t* sm_pCurrSpill;   // Pointer to current location in the spill region
@@ -215,14 +224,14 @@
     void transform();
 
   private:
-    Phase3(): m_instManip(0) {}
+    Phase3() {}
 
     void            processCandidates(vector<InstCandidate>& candidates);
     inline unsigned getSlotSize(InstCandidate&) const;
 
     Phase3Info* m_pPhase3Info;
     TraceCache* m_pTraceCache;
-    InstManip   m_instManip;
+    SparcInstManip*  m_pInstManip;
 };
 
 // Phase4 is the class that is responsible for making the "phase 4" transformation; the
@@ -238,13 +247,13 @@
     void transform();
 
   private:
-    Phase4(): m_instManip(0) {}
+    Phase4() {}
 
     inline unsigned getSlotSize() const;
 
     Phase4Info* m_pPhase4Info;
     TraceCache* m_pTraceCache;
-    InstManip   m_instManip;
+    SparcInstManip*  m_pInstManip;
     uint64_t    m_tag;         // Entry to look for in the GBT
 };
 
@@ -252,13 +261,15 @@
 
 extern "C" void phase2() 
 {
-    Phase2 ph(new TraceCache());
+    TraceCache* pTC = new TraceCache();
+    SparcInstManip* pIM = new SparcInstManip(pTC->getVM());
+    Phase2 ph(pTC, pIM);
     ph.transform();
 }
 
-Phase2::Phase2(TraceCache* tc):
+Phase2::Phase2(TraceCache* tc, SparcInstManip* pInstManip):
     m_pTraceCache(tc),
-    m_instManip(tc->getVM())
+    m_pInstManip(pInstManip)
 {
 }
 
@@ -288,7 +299,7 @@
     // invocations.  We allocate one unit of space (given by InstManip::getSharedSize())
     // for each function that we transform.
 
-    sm_pSpillRegion = new uint64_t[m_instManip.getSharedSize() * funcs.size()];
+    sm_pSpillRegion = new uint64_t[m_pInstManip->getSharedSize() * funcs.size()];
     sm_pCurrSpill = sm_pSpillRegion;
 
     for(vector<std::pair<std::string, AddressRange> >::iterator i = funcs.begin(),
@@ -320,49 +331,49 @@
     // Obtain address of first replacable instruction in function and obtain a new slot from
     // the TraceCache memory manager (i.e., a new slot in the dummy function).
     
-    uint64_t repInstAddr = m_instManip.skipFunctionHdr(range.first);
+    uint64_t repInstAddr = m_pInstManip->skipFunctionHdr(range.first);
     uint64_t slotBase = m_pTraceCache->getMemMgr()->getMemory(getSlotSize());
     assert(slotBase && "Unable to obtain memory from MemoryManger instance");
 
     // Replace instruction at repInstAddr with a branch to start of slot.
     VirtualMem* vm = m_pTraceCache->getVM();
     unsigned origInst = vm->readInstrFrmVm(repInstAddr);
-    assert(!m_instManip.isBranch(origInst) &&
+    assert(!m_pInstManip->isBranch(origInst) &&
            "Unhandled case: branch instruction first in function body");
-    vm->writeInstToVM(repInstAddr, m_instManip.getBranchAlways(slotBase, repInstAddr));
+    vm->writeInstToVM(repInstAddr, m_pInstManip->getBranchAlways(slotBase, repInstAddr));
 
     // Generate the phase 3 slot. See picture of phase 3 slot contents for more info.
 
     Phase3Info* p3info = new Phase3Info(range, origInst, repInstAddr,
-                                        slotBase, getSlotSize(), m_pTraceCache);
+                                        slotBase, getSlotSize(), m_pTraceCache, m_pInstManip);
 
     vector<unsigned> snippet;
-    m_instManip.startCode(snippet);
+    m_pInstManip->startCode(snippet);
 
-    m_instManip.generateSave();
-    m_instManip.generateSpillShared((uint64_t) sm_pCurrSpill);
-    m_instManip.generateLoad((uint64_t) p3info, InstManip::REG_0, InstManip::REG_1);
-    m_instManip.generateCall((uint64_t) &phase3, slotBase);
-    m_instManip.generateRestoreShared((uint64_t) sm_pCurrSpill);
-    m_instManip.generateBranchAlways(repInstAddr, slotBase, m_instManip.getRestoreInst());
+    m_pInstManip->generateSave();
+    m_pInstManip->generateSpillShared((uint64_t) sm_pCurrSpill);
+    m_pInstManip->generateLoad((uint64_t) p3info, InstManip::REG_0, InstManip::REG_1);
+    m_pInstManip->generateCall((uint64_t) &phase3, slotBase);
+    m_pInstManip->generateRestoreShared((uint64_t) sm_pCurrSpill);
+    m_pInstManip->generateBranchAlways(repInstAddr, slotBase, m_pInstManip->getRestoreInst());
 
-    m_instManip.endCode();
+    m_pInstManip->endCode();
 
     // Dump snippet instructions:
     cerr << "phase3 slot instructions:" << endl;
     for(vector<unsigned>::iterator j = snippet.begin(), k = snippet.end(); j != k; ++j) {
-        m_instManip.printInst(*j);
+        m_pInstManip->printInst(*j);
         cerr << endl;
     }
 
     // Bump the current spill pointer to the next "spill slot" in the spill region used
     // before/after phase3() invocations.
 
-    sm_pCurrSpill += m_instManip.getSharedSize();
+    sm_pCurrSpill += m_pInstManip->getSharedSize();
 
     // Copy the snippet code into the slot
     assert(snippet.size() == getSlotSize() && "Snippet size does not match slot size");
-    copySnippetToSlot(snippet, slotBase, vm, m_instManip);
+    copySnippetToSlot(snippet, slotBase, vm, *m_pInstManip);
 }
 
 unsigned Phase2::getSlotSize() const
@@ -370,12 +381,12 @@
     // The following sum corresponds to the sizes consumed by the various regions of the
     // phase 2 slot.  See picture of phase 2 contents for details.
 
-    return m_instManip.getGenSaveSize() +
-        m_instManip.getGenSpillSharedSize() +
-        m_instManip.getGenLoadSize() +
-        m_instManip.getGenCallSize() +
-        m_instManip.getGenRestoreSharedSize() +
-        m_instManip.getGenBranchAlwaysSize();
+    return m_pInstManip->getGenSaveSize() +
+        m_pInstManip->getGenSpillSharedSize() +
+        m_pInstManip->getGenLoadSize() +
+        m_pInstManip->getGenCallSize() +
+        m_pInstManip->getGenRestoreSharedSize() +
+        m_pInstManip->getGenBranchAlwaysSize();
 }
 
 //////////////// Phase3 implementation ////////////////
@@ -389,7 +400,7 @@
 Phase3::Phase3(Phase3Info* p3info):
     m_pPhase3Info(p3info),
     m_pTraceCache(p3info->getTraceCache()),
-    m_instManip(p3info->getTraceCache()->getVM())
+    m_pInstManip(p3info->getIM())
 {
     cerr << "================ Begin Phase 3 [" << std::hex
          << m_pPhase3Info->getStartAddr() << ", " << m_pPhase3Info->getEndAddr()
@@ -423,7 +434,7 @@
 static uint64_t replaceInstWithBrToSlot(uint64_t srcAddr,
                                         unsigned slotSize,
                                         TraceCache* tc,
-                                        InstManip& im) 
+                                        SparcInstManip& im) 
 {
     // Obtain a new slot of the given size
     uint64_t slotBase = tc->getMemMgr()->getMemory(slotSize);
@@ -445,16 +456,16 @@
 
         // Replace load candidate instruction with a branch to the start of a new slot.
         uint64_t slotBase = replaceInstWithBrToSlot(i->front().first, getSlotSize(*i),
-                                                    m_pTraceCache, m_instManip);
+                                                    m_pTraceCache, *m_pInstManip);
 
         // Generate the phase 4 slot. See picture of phase 4 slot contents for more info.
 
-        Phase4Info* p4info = new Phase4Info(*i, slotBase, getSlotSize(*i), m_pTraceCache);
+        Phase4Info* p4info = new Phase4Info(*i, slotBase, getSlotSize(*i), m_pTraceCache, m_pInstManip);
 
-        uint64_t spillAddr = m_instManip.getPhase3SpillAddr();
+        uint64_t spillAddr = m_pInstManip->getPhase3SpillAddr();
 
         vector<unsigned> snippet;
-        m_instManip.startCode(snippet);
+        m_pInstManip->startCode(snippet);
 
         // NB: We pass parameters to the phase4 function in REG_0 and REG_1 on the
         // assumption that the input parameters will be looked for there. However, it is
@@ -462,30 +473,30 @@
         // fixed offsets from the stack pointer.  Hence, we store the parameters there as
         // well.
         
-        m_instManip.generateSave();
-        m_instManip.generateAddressCopy(i->front().second, InstManip::REG_0, true);      // REG_0 live to call
-        m_instManip.generateParamStore(InstManip::REG_0, InstManip::PARAM_0);
-        m_instManip.generateSpillShared(spillAddr, InstManip::REG_1, InstManip::REG_2);
-        m_instManip.generateLoad((uint64_t) p4info, InstManip::REG_1, InstManip::REG_2); // REG_1 live to call
-        m_instManip.generateParamStore(InstManip::REG_1, InstManip::PARAM_1);
-        m_instManip.generateCall((uint64_t) &phase4, slotBase);
-        m_instManip.generateRestoreShared(spillAddr);
-        m_instManip.generateBranchAlways(i->front().first, slotBase, m_instManip.getRestoreInst());
+        m_pInstManip->generateSave();
+        m_pInstManip->generateAddressCopy(i->front().second, InstManip::REG_0, true);      // REG_0 live to call
+        m_pInstManip->generateParamStore(InstManip::REG_0, SparcInstManip::PARAM_0);
+        m_pInstManip->generateSpillShared(spillAddr, InstManip::REG_1, InstManip::REG_2);
+        m_pInstManip->generateLoad((uint64_t) p4info, InstManip::REG_1, InstManip::REG_2); // REG_1 live to call
+        m_pInstManip->generateParamStore(InstManip::REG_1, SparcInstManip::PARAM_1);
+        m_pInstManip->generateCall((uint64_t) &phase4, slotBase);
+        m_pInstManip->generateRestoreShared(spillAddr);
+        m_pInstManip->generateBranchAlways(i->front().first, slotBase, m_pInstManip->getRestoreInst());
 
-        m_instManip.endCode();
+        m_pInstManip->endCode();
 
         // Dump snippet instructions:
 
         cerr << "phase4 slot instructions:" << endl;
         
         for(vector<unsigned>::iterator j = snippet.begin(), k = snippet.end(); j != k; ++j) {
-            m_instManip.printInst(*j);
+            m_pInstManip->printInst(*j);
             cerr << endl;
         }
 
         // Copy the snippet code into the slot
         assert(snippet.size() == getSlotSize(*i) && "Snippet size does not match slot size");
-        copySnippetToSlot(snippet, slotBase, m_pTraceCache->getVM(), m_instManip);
+        copySnippetToSlot(snippet, slotBase, m_pTraceCache->getVM(), *m_pInstManip);
 
         // just one candidate for now
         break;
@@ -497,22 +508,22 @@
     // The following sum corresponds to the sizes consumed by the various regions of the
     // phase 3 slot.  See picture of phase 3 contents for details.
 
-    return m_instManip.getGenSaveSize() +
-        m_instManip.getGenAddressCopySize(cand.front().second) +
-        m_instManip.getGenParamStoreSize() +
-        m_instManip.getGenSpillSharedSize() +
-        m_instManip.getGenLoadSize() +
-        m_instManip.getGenParamStoreSize() +
-        m_instManip.getGenCallSize() +
-        m_instManip.getGenRestoreSharedSize() +
-        m_instManip.getGenBranchAlwaysSize();
+    return m_pInstManip->getGenSaveSize() +
+        m_pInstManip->getGenAddressCopySize(cand.front().second) +
+        m_pInstManip->getGenParamStoreSize() +
+        m_pInstManip->getGenSpillSharedSize() +
+        m_pInstManip->getGenLoadSize() +
+        m_pInstManip->getGenParamStoreSize() +
+        m_pInstManip->getGenCallSize() +
+        m_pInstManip->getGenRestoreSharedSize() +
+        m_pInstManip->getGenBranchAlwaysSize();
 }
 
 void Phase3::transform()
 {
     // 2. Analyze the function and determine the load-volatile candidates...
     vector<InstCandidate> candidates;
-    m_instManip.findCandidates(m_pPhase3Info->getStartAddr(),
+    m_pInstManip->findCandidates(m_pPhase3Info->getStartAddr(),
                                m_pPhase3Info->getEndAddr(),
                                candidates);
 
@@ -533,7 +544,7 @@
 Phase4::Phase4(uint64_t tag, Phase4Info* p4info):
     m_pPhase4Info(p4info),
     m_pTraceCache(p4info->getTraceCache()),
-    m_instManip(p4info->getTraceCache()->getVM()),
+    m_pInstManip(p4info->getIM()),
     m_tag(tag)
 {
     cerr << "phase4 ctor: tag is " << tag << endl;
@@ -609,7 +620,7 @@
         VirtualMem* vm = m_pTraceCache->getVM();
         for(vector<std::pair<uint64_t, unsigned> >::const_iterator i = cand.getInsts().begin() + 1,
                 e = cand.getInsts().end(); i != e; ++i)
-            vm->writeInstToVM(i->first, m_instManip.getNOP());
+            vm->writeInstToVM(i->first, m_pInstManip->getNOP());
 
         // Write the instructions to call the instrumentation function
 





More information about the llvm-commits mailing list