[llvm-commits] CVS: llvm/lib/Reoptimizer/LightWtProfiling/Trigger/FirstTrigger.cpp

Anand Shukla ashukla at cs.uiuc.edu
Fri Jul 18 11:02:01 PDT 2003


Changes in directory llvm/lib/Reoptimizer/LightWtProfiling/Trigger:

FirstTrigger.cpp updated: 1.3 -> 1.4

---
Log message:

Major revision for runtime tracing framework

---
Diffs of the changes:

Index: llvm/lib/Reoptimizer/LightWtProfiling/Trigger/FirstTrigger.cpp
diff -u llvm/lib/Reoptimizer/LightWtProfiling/Trigger/FirstTrigger.cpp:1.3 llvm/lib/Reoptimizer/LightWtProfiling/Trigger/FirstTrigger.cpp:1.4
--- llvm/lib/Reoptimizer/LightWtProfiling/Trigger/FirstTrigger.cpp:1.3	Sun Nov  3 13:27:17 2002
+++ llvm/lib/Reoptimizer/LightWtProfiling/Trigger/FirstTrigger.cpp	Fri Jul 18 11:01:38 2003
@@ -9,84 +9,288 @@
 
 #include "llvm/Reoptimizer/VirtualMem.h"
 #include "llvm/Reoptimizer/InstrUtils.h"
+#include "llvm/Reoptimizer/TraceCache.h"
+#include "llvm/Reoptimizer/GetTraceTime.h"
+#include <sys/time.h>
 #include <stdlib.h>
 #include <sys/time.h>
 #include <iostream>
 #include <fstream>
+#include "llvm/Reoptimizer/CFG.h"
+#include <algorithm>
+#include "GetTimer.h"
 
-int reopt_threshold;
+using namespace std;
 
-#ifdef GET_TRACE_TIME
+extern long THRESHOLD_LEVEL_2;
+extern long LEVEL_TWO_EXITS; 
 
-static struct timeval llvm_trace_time;
-static struct timeval llvm_trigger_time;
 
-extern "C" void llvm_time_start(){
-  gettimeofday(&llvm_trace_time, 0);
-}
+#ifdef __sparc
+#include <libcpc.h>
+#endif
 
-extern "C" void llvm_time_end(){
-  static std::ofstream f_out("llvm_last_run");
+//global counters for diagonistcs
+extern "C" int initialize_timer();
+extern uint64_t llvm_interval_counter;
+extern std::map<uint64_t, int> exitCounter;
+extern std::map<uint64_t, int> iterationCounter;
+extern std::map<uint64_t, uint64_t> firstTriggerAddr; //tracestart addr
+extern std::map<uint64_t, std::pair<long, long> > backOffCounters; 
 
-  struct timeval time_end;
-  gettimeofday(&time_end, 0);
-  
-  long time = (time_end.tv_sec - llvm_trace_time.tv_sec)*1000000 + 
-    (time_end.tv_usec - llvm_trace_time.tv_usec);
-  
-  f_out<<time<<"\n";
-}
+#ifdef GET_ALL_INFO
+int global_threshold2;
+#endif
 
-void llvm_trigger_time_start(){
-  gettimeofday(&llvm_trigger_time, 0);
-}
+//#define THRESHOLD_LEVEL_2 50
 
-void llvm_trigger_time_end(){
-  static std::ofstream f_out("llvm_trigger_last_run");
-    
-  struct timeval time_end;
-  gettimeofday(&time_end, 0);
-  
-  long time = (time_end.tv_sec - llvm_trigger_time.tv_sec)*1000000 + 
-    (time_end.tv_usec - llvm_trigger_time.tv_usec);
-  
-  f_out<<time<<"\n";
-}
+//good, bad backoff
+void insert_address_at(uint64_t n, uint64_t m);
 
-#endif
+void doInstrumentation(uint64_t addr1, uint64_t addr2, VirtualMem *vm);
 
-void getFirstTrace(uint64_t addr1, uint64_t addr2, int depth, VirtualMem *vm){
+//function used as dummy function
+int dummyFunction2(int);
+
+int reopt_threshold=30;
+int exit_count=0;
+
+TraceCache *tr;
+TraceCache *tr2;
+VirtualMem *vm;
+
+void getFirstTrace(uint64_t addr1, uint64_t addr2, int depth, VirtualMem *vm, 
+		   TraceCache *tr){
   //addr1: the "target" of backward branch
   //addr2: the PC of branch instruction
   //to read any instruction at address addr1 do the following
   //     unsigned int instr =  vm->readInstrFrmVm(addr);
+  doInstrumentation(addr1, addr2, vm);
 }
 
-
 extern "C" void reoptimizerInitialize(int *t){
-  char *a = getenv("LLVM_THRESHOLD");
-  if(!a)
-    *t = 30; //default threshold!
+  vm = new VirtualMem();
+  tr = new TraceCache(30000, vm);
+  tr2 = new TraceCache(&dummyFunction2, 30000, vm);
+ 
+  if(getenv("THRESHOLD_LEVEL_2")!=NULL)
+    THRESHOLD_LEVEL_2 = atoi(getenv("THRESHOLD_LEVEL_2"));
   else
-    *t = atoi(a);
+    THRESHOLD_LEVEL_2= 50;
+
+  LEVEL_TWO_EXITS = THRESHOLD_LEVEL_2/3;
+
+  if(getenv("THRESHOLD_LEVEL_1")!=NULL)
+    reopt_threshold = atoi(getenv("THRESHOLD_LEVEL_1"));
+  else
+    reopt_threshold = 30;
+
+  *t=30;
+
+  //initialize_timer();
 }
 
-extern "C" void llvm_first_trigger(int *cnt){
-  uint64_t brAddr;
+extern "C" void llvm_first_trigger(){
+  
+  //save regs
+  uint64_t i_reg_save[6];
+  uint32_t f_reg_save[32];
+  uint64_t fd_reg_save[16];
+  uint64_t ccr_reg;
+  uint64_t fprs_reg;
+  uint64_t fsr_reg;
+  uint64_t g1_reg;
+ 
+  //#ifdef __sparc
+  asm volatile ("stx %%i0, %0": "=m"(i_reg_save[0]));
+  asm volatile ("stx %%i1, %0": "=m"(i_reg_save[1]));
+  asm volatile ("stx %%i2, %0": "=m"(i_reg_save[2]));
+  asm volatile ("stx %%i3, %0": "=m"(i_reg_save[3]));
+  asm volatile ("stx %%i4, %0": "=m"(i_reg_save[4]));
+  asm volatile ("stx %%i5, %0": "=m"(i_reg_save[5]));
+
+  asm volatile ("stx %%g1, %0": "=m"(g1_reg));
+  asm volatile ("st %%f0, %0": "=m"(f_reg_save[0]));
+  asm volatile ("st %%f1, %0": "=m"(f_reg_save[1]));
+  asm volatile ("st %%f2, %0": "=m"(f_reg_save[2]));
+  asm volatile ("st %%f3, %0": "=m"(f_reg_save[3]));
+  asm volatile ("st %%f4, %0": "=m"(f_reg_save[4]));
+  asm volatile ("st %%f5, %0": "=m"(f_reg_save[5]));
+  asm volatile ("st %%f6, %0": "=m"(f_reg_save[6]));
+  asm volatile ("st %%f7, %0": "=m"(f_reg_save[7]));
+  asm volatile ("st %%f8, %0": "=m"(f_reg_save[8]));
+  asm volatile ("st %%f9, %0": "=m"(f_reg_save[9]));
+  asm volatile ("st %%f10, %0": "=m"(f_reg_save[10]));
+  asm volatile ("st %%f11, %0": "=m"(f_reg_save[11]));
+  asm volatile ("st %%f12, %0": "=m"(f_reg_save[12]));
+  asm volatile ("st %%f13, %0": "=m"(f_reg_save[13]));
+  asm volatile ("st %%f14, %0": "=m"(f_reg_save[14]));
+  asm volatile ("st %%f15, %0": "=m"(f_reg_save[15]));
+
+  /*
+  asm volatile ("st %%f16, %0": "=m"(f_reg_save[16]));
+  asm volatile ("st %%f17, %0": "=m"(f_reg_save[17]));
+  asm volatile ("st %%f18, %0": "=m"(f_reg_save[18]));
+  asm volatile ("st %%f19, %0": "=m"(f_reg_save[19]));
+  asm volatile ("st %%f20, %0": "=m"(f_reg_save[20]));
+  asm volatile ("st %%f21, %0": "=m"(f_reg_save[21]));
+  asm volatile ("st %%f22, %0": "=m"(f_reg_save[22]));
+  asm volatile ("st %%f23, %0": "=m"(f_reg_save[23]));
+  asm volatile ("st %%f24, %0": "=m"(f_reg_save[24]));
+  asm volatile ("st %%f25, %0": "=m"(f_reg_save[25]));
+  asm volatile ("st %%f26, %0": "=m"(f_reg_save[26]));
+  asm volatile ("st %%f27, %0": "=m"(f_reg_save[27]));
+  asm volatile ("st %%f28, %0": "=m"(f_reg_save[28]));
+  asm volatile ("st %%f29, %0": "=m"(f_reg_save[29]));
+  asm volatile ("st %%f30, %0": "=m"(f_reg_save[30]));
+  asm volatile ("st %%f31, %0": "=m"(f_reg_save[31]));*/
+  
+  asm volatile ("std %%f32, %0": "=m"(fd_reg_save[32/2-16]));
+  asm volatile ("std %%f34, %0": "=m"(fd_reg_save[34/2-16]));
+  asm volatile ("std %%f36, %0": "=m"(fd_reg_save[36/2-16]));
+  asm volatile ("std %%f38, %0": "=m"(fd_reg_save[38/2-16]));
+  asm volatile ("std %%f40, %0": "=m"(fd_reg_save[40/2-16]));
+  asm volatile ("std %%f42, %0": "=m"(fd_reg_save[42/2-16]));
+  asm volatile ("std %%f44, %0": "=m"(fd_reg_save[44/2-16]));
+  asm volatile ("std %%f46, %0": "=m"(fd_reg_save[46/2-16]));
+  //*/
+  asm volatile ("std %%f48, %0": "=m"(fd_reg_save[48/2-16]));
+  asm volatile ("std %%f50, %0": "=m"(fd_reg_save[50/2-16]));
+  asm volatile ("std %%f52, %0": "=m"(fd_reg_save[52/2-16]));
+  asm volatile ("std %%f54, %0": "=m"(fd_reg_save[54/2-16]));
+  asm volatile ("std %%f56, %0": "=m"(fd_reg_save[56/2-16]));
+  asm volatile ("std %%f58, %0": "=m"(fd_reg_save[58/2-16]));
+  asm volatile ("std %%f60, %0": "=m"(fd_reg_save[60/2-16]));
+  asm volatile ("std %%f62, %0": "=m"(fd_reg_save[62/2-16]));
+
+  asm volatile ("stx %%fsr, %0": "=m" (fsr_reg));
+  asm volatile ("rd %%fprs, %0": "=r"(fprs_reg));
+
+  asm volatile ("rd %%ccr, %0": "=r"(ccr_reg));
+ 
+  //#endif
+  //a map of counters
 
-  static VirtualMem *vm = new VirtualMem();
+  static map<uint64_t, int> counterMap;
+  static map<uint64_t, int> seenOccur;
 
-  std::cerr<<"Count = "<<*cnt<<"\n";
+  uint64_t brAddr;
 
-#if 0   // FIXME: This breaks x86 build
+#ifdef __sparc
   asm("add %%i7, %1, %0":"=r"(brAddr):"i" (0));
+#else
+  assert(false && "Case not handled for this processor architecture!");
 #endif
 
-  unsigned int brInst = vm->readInstrFrmVm(brAddr+8);
-  assert(isBranchInstr(brInst) && "Not a branch!");
+  //if(counterMap[brAddr] > 200)
+  //return;
+  if(++counterMap[brAddr] > reopt_threshold){
 
-  uint64_t brTarget = getBranchTarget(brInst, brAddr+8);
-  //std::cerr<<(void *)brTarget<<"\n";
+    counterMap.erase(brAddr);
+
+    //std::cerr<<"Originally Removed from addr: "<<(void *)brAddr<<"\n";
+
+    char offst = 8; 
+    unsigned int brInst = vm->readInstrFrmVm(brAddr + offst, tr, tr2);
+    
+    while(!isBranchInstr(brInst)){
+      offst += 4;
+      brInst = vm->readInstrFrmVm(brAddr + offst, tr, tr2);
+    }
+    
+    assert(isBranchInstr(brInst) && "Not a branch!");
+    
+    uint64_t brTarget = getBranchTarget(brInst, brAddr+offst);
+  
+    firstTriggerAddr[brTarget] = brAddr;
+    
+    //check if tracecache already has optimized code. If yes,
+    //do not generate SLI
+    if(!tr->hasTraceAddr(brTarget, brAddr+offst)){
+      getFirstTrace(brTarget, brAddr+offst, 100, vm, tr);
+    }
+    else{
+#ifdef GET_ALL_INFO
+      std::cerr<<"SLI-exists\t"<<(void *)brTarget<<"\t"<<(void *)(brTarget+offst)<<"\n";
+#endif
+      //write a branch going to top of trace in tr
+      uint64_t traceAddrInTC = tr->getStartAddr(brTarget);
+      
+      vm->writeInstToVM(traceAddrInTC, tr->getAddr(brTarget));
+      vm->writeBranchInstruction(brTarget, traceAddrInTC);
+      doFlush(brTarget-16, brTarget+16);
+      doFlush(traceAddrInTC-16, traceAddrInTC+16);
+    }
+        
+    assert(isCallInstr(vm->readInstrFrmVm(brAddr, tr, tr2)) && "not call");
+    vm->writeInstToVM(brAddr, NOP);
+    doFlush(brAddr-8, brAddr+16);
+  }
+ 
+  //#ifdef __sparc
+  fprs_reg ^= 0;
+  ccr_reg ^= 0;
+  asm volatile ("wr %0, 0, %%ccr":: "r"(ccr_reg));
+  asm volatile ("ldx %0, %%i0":: "m"(i_reg_save[0]));
+  asm volatile ("ldx %0, %%i1":: "m"(i_reg_save[1]));
+  asm volatile ("ldx %0, %%i2":: "m"(i_reg_save[2]));
+  asm volatile ("ldx %0, %%i3":: "m"(i_reg_save[3]));
+  asm volatile ("ldx %0, %%i4":: "m"(i_reg_save[4]));
+  asm volatile ("ldx %0, %%i5":: "m"(i_reg_save[5]));
+
+  asm volatile ("wr %0, 0, %%fprs":: "r"(fprs_reg));
+  asm volatile ("ldx %0, %%g1":: "m"(g1_reg));
+  asm volatile ("ld %0, %%f0":: "m"(f_reg_save[0]));
+  asm volatile ("ld %0, %%f1":: "m"(f_reg_save[1]));
+  asm volatile ("ld %0, %%f2":: "m"(f_reg_save[2]));
+  asm volatile ("ld %0, %%f3":: "m"(f_reg_save[3]));
+  asm volatile ("ld %0, %%f4":: "m"(f_reg_save[4]));
+  asm volatile ("ld %0, %%f5":: "m"(f_reg_save[5]));
+  asm volatile ("ld %0, %%f6":: "m"(f_reg_save[6]));
+  asm volatile ("ld %0, %%f7":: "m"(f_reg_save[7]));
+  asm volatile ("ld %0, %%f8":: "m"(f_reg_save[8]));
+  asm volatile ("ld %0, %%f9":: "m"(f_reg_save[9]));
+  asm volatile ("ld %0, %%f10":: "m"(f_reg_save[10]));
+  asm volatile ("ld %0, %%f11":: "m"(f_reg_save[11]));
+  asm volatile ("ld %0, %%f12":: "m"(f_reg_save[12]));
+  asm volatile ("ld %0, %%f13":: "m"(f_reg_save[13]));
+  asm volatile ("ld %0, %%f14":: "m"(f_reg_save[14]));
+  asm volatile ("ld %0, %%f15":: "m"(f_reg_save[15]));
+  /*
+  asm volatile ("ld %0, %%f16":: "m"(f_reg_save[16]));
+  asm volatile ("ld %0, %%f17":: "m"(f_reg_save[17]));
+  asm volatile ("ld %0, %%f18":: "m"(f_reg_save[18]));
+  asm volatile ("ld %0, %%f19":: "m"(f_reg_save[19]));
+  asm volatile ("ld %0, %%f20":: "m"(f_reg_save[20]));
+  asm volatile ("ld %0, %%f21":: "m"(f_reg_save[21]));
+  asm volatile ("ld %0, %%f22":: "m"(f_reg_save[22]));
+  asm volatile ("ld %0, %%f23":: "m"(f_reg_save[23]));
+  asm volatile ("ld %0, %%f24":: "m"(f_reg_save[24]));
+  asm volatile ("ld %0, %%f25":: "m"(f_reg_save[25]));
+  asm volatile ("ld %0, %%f26":: "m"(f_reg_save[26]));
+  asm volatile ("ld %0, %%f27":: "m"(f_reg_save[27]));
+  asm volatile ("ld %0, %%f28":: "m"(f_reg_save[28]));
+  asm volatile ("ld %0, %%f29":: "m"(f_reg_save[29]));
+  asm volatile ("ld %0, %%f30":: "m"(f_reg_save[30]));
+  asm volatile ("ld %0, %%f31":: "m"(f_reg_save[31]));*/
+  asm volatile ("ldd %0, %%f32":: "m"(fd_reg_save[32/2-16]));
+  asm volatile ("ldd %0, %%f34":: "m"(fd_reg_save[34/2-16]));
+  asm volatile ("ldd %0, %%f36":: "m"(fd_reg_save[36/2-16]));
+  asm volatile ("ldd %0, %%f38":: "m"(fd_reg_save[38/2-16]));
+  asm volatile ("ldd %0, %%f40":: "m"(fd_reg_save[40/2-16]));
+  asm volatile ("ldd %0, %%f42":: "m"(fd_reg_save[42/2-16]));
+  asm volatile ("ldd %0, %%f44":: "m"(fd_reg_save[44/2-16]));
+  asm volatile ("ldd %0, %%f46":: "m"(fd_reg_save[46/2-16]));
+  //*/
+  asm volatile ("ldd %0, %%f48":: "m"(fd_reg_save[48/2-16]));
+  asm volatile ("ldd %0, %%f50":: "m"(fd_reg_save[50/2-16]));
+  asm volatile ("ldd %0, %%f52":: "m"(fd_reg_save[52/2-16]));
+  asm volatile ("ldd %0, %%f54":: "m"(fd_reg_save[54/2-16]));
+  asm volatile ("ldd %0, %%f56":: "m"(fd_reg_save[56/2-16]));
+  asm volatile ("ldd %0, %%f58":: "m"(fd_reg_save[58/2-16]));
+  asm volatile ("ldd %0, %%f60":: "m"(fd_reg_save[60/2-16]));
+  asm volatile ("ldd %0, %%f62":: "m"(fd_reg_save[62/2-16]));
+  asm volatile ("ldx %0, %%fsr":: "m"(fsr_reg));
 
-  getFirstTrace(brTarget, brAddr+8, 100, vm);
+  //#endif
 }





More information about the llvm-commits mailing list