[llvm-branch-commits] [llvm-branch] r101926 - in /llvm/branches/Apple/Morbo: include/llvm/Analysis/InlineCost.h lib/Analysis/InlineCost.cpp

Dale Johannesen dalej at apple.com
Tue Apr 20 11:15:05 PDT 2010


Author: johannes
Date: Tue Apr 20 13:15:05 2010
New Revision: 101926

URL: http://llvm.org/viewvc/llvm-project?rev=101926&view=rev
Log:
Merge fixes for the other nondeterministic bug.
--- Merging r101584 into '.':
U    include/llvm/Analysis/InlineCost.h
--- Merging r101657 into '.':
U    lib/Analysis/InlineCost.cpp
--- Merging r101658 into '.':
G    lib/Analysis/InlineCost.cpp


Modified:
    llvm/branches/Apple/Morbo/include/llvm/Analysis/InlineCost.h
    llvm/branches/Apple/Morbo/lib/Analysis/InlineCost.cpp

Modified: llvm/branches/Apple/Morbo/include/llvm/Analysis/InlineCost.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Morbo/include/llvm/Analysis/InlineCost.h?rev=101926&r1=101925&r2=101926&view=diff
==============================================================================
--- llvm/branches/Apple/Morbo/include/llvm/Analysis/InlineCost.h (original)
+++ llvm/branches/Apple/Morbo/include/llvm/Analysis/InlineCost.h Tue Apr 20 13:15:05 2010
@@ -16,9 +16,9 @@
 
 #include <cassert>
 #include <climits>
-#include <map>
 #include <vector>
 #include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/ValueMap.h"
 
 namespace llvm {
 
@@ -165,7 +165,7 @@
       void analyzeFunction(Function *F);
     };
 
-    std::map<const Function *, FunctionInfo> CachedFunctionInfo;
+    ValueMap<const Function *, FunctionInfo> CachedFunctionInfo;
 
   public:
 

Modified: llvm/branches/Apple/Morbo/lib/Analysis/InlineCost.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Morbo/lib/Analysis/InlineCost.cpp?rev=101926&r1=101925&r2=101926&view=diff
==============================================================================
--- llvm/branches/Apple/Morbo/lib/Analysis/InlineCost.cpp (original)
+++ llvm/branches/Apple/Morbo/lib/Analysis/InlineCost.cpp Tue Apr 20 13:15:05 2010
@@ -249,7 +249,7 @@
 // function call or not.
 //
 InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
-                               SmallPtrSet<const Function *, 16> &NeverInline) {
+                               SmallPtrSet<const Function*, 16> &NeverInline) {
   Instruction *TheCall = CS.getInstruction();
   Function *Callee = CS.getCalledFunction();
   Function *Caller = TheCall->getParent()->getParent();
@@ -286,31 +286,36 @@
   } else if (isa<UnreachableInst>(++BasicBlock::iterator(TheCall)))
     InlineCost += InlineConstants::NoreturnPenalty;
   
-  // Get information about the callee...
-  FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
+  // Get information about the callee.
+  FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
   
   // If we haven't calculated this information yet, do so now.
-  if (CalleeFI.Metrics.NumBlocks == 0)
-    CalleeFI.analyzeFunction(Callee);
+  if (CalleeFI->Metrics.NumBlocks == 0)
+    CalleeFI->analyzeFunction(Callee);
 
   // If we should never inline this, return a huge cost.
-  if (CalleeFI.Metrics.NeverInline)
+  if (CalleeFI->Metrics.NeverInline)
     return InlineCost::getNever();
 
-  // FIXME: It would be nice to kill off CalleeFI.NeverInline. Then we
+  // FIXME: It would be nice to kill off CalleeFI->NeverInline. Then we
   // could move this up and avoid computing the FunctionInfo for
   // things we are going to just return always inline for. This
   // requires handling setjmp somewhere else, however.
   if (!Callee->isDeclaration() && Callee->hasFnAttr(Attribute::AlwaysInline))
     return InlineCost::getAlways();
     
-  if (CalleeFI.Metrics.usesDynamicAlloca) {
-    // Get infomation about the caller...
+  if (CalleeFI->Metrics.usesDynamicAlloca) {
+    // Get infomation about the caller.
     FunctionInfo &CallerFI = CachedFunctionInfo[Caller];
 
     // If we haven't calculated this information yet, do so now.
-    if (CallerFI.Metrics.NumBlocks == 0)
+    if (CallerFI.Metrics.NumBlocks == 0) {
       CallerFI.analyzeFunction(Caller);
+     
+      // Recompute the CalleeFI pointer, getting Caller could have invalidated
+      // it.
+      CalleeFI = &CachedFunctionInfo[Callee];
+    }
 
     // Don't inline a callee with dynamic alloca into a caller without them.
     // Functions containing dynamic alloca's are inefficient in various ways;
@@ -337,15 +342,15 @@
     // scalarization), so encourage the inlining of the function.
     //
     if (isa<AllocaInst>(I)) {
-      if (ArgNo < CalleeFI.ArgumentWeights.size())
-        InlineCost -= CalleeFI.ArgumentWeights[ArgNo].AllocaWeight;
+      if (ArgNo < CalleeFI->ArgumentWeights.size())
+        InlineCost -= CalleeFI->ArgumentWeights[ArgNo].AllocaWeight;
 
       // If this is a constant being passed into the function, use the argument
       // weights calculated for the callee to determine how much will be folded
       // away with this information.
     } else if (isa<Constant>(I)) {
-      if (ArgNo < CalleeFI.ArgumentWeights.size())
-        InlineCost -= CalleeFI.ArgumentWeights[ArgNo].ConstantWeight;
+      if (ArgNo < CalleeFI->ArgumentWeights.size())
+        InlineCost -= CalleeFI->ArgumentWeights[ArgNo].ConstantWeight;
     }
   }
   
@@ -353,10 +358,10 @@
   // likely to be inlined, look at factors that make us not want to inline it.
 
   // Calls usually take a long time, so they make the inlining gain smaller.
-  InlineCost += CalleeFI.Metrics.NumCalls * InlineConstants::CallPenalty;
+  InlineCost += CalleeFI->Metrics.NumCalls * InlineConstants::CallPenalty;
 
   // Look at the size of the callee. Each instruction counts as 5.
-  InlineCost += CalleeFI.Metrics.NumInsts*InlineConstants::InstrCost;
+  InlineCost += CalleeFI->Metrics.NumInsts*InlineConstants::InstrCost;
 
   return llvm::InlineCost::get(InlineCost);
 }
@@ -366,7 +371,7 @@
 float InlineCostAnalyzer::getInlineFudgeFactor(CallSite CS) {
   Function *Callee = CS.getCalledFunction();
   
-  // Get information about the callee...
+  // Get information about the callee.
   FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
   
   // If we haven't calculated this information yet, do so now.
@@ -390,41 +395,49 @@
 /// growCachedCostInfo - update the cached cost info for Caller after Callee has
 /// been inlined.
 void
-InlineCostAnalyzer::growCachedCostInfo(Function* Caller, Function* Callee) {
-  FunctionInfo &CallerFI = CachedFunctionInfo[Caller];
+InlineCostAnalyzer::growCachedCostInfo(Function *Caller, Function *Callee) {
+  CodeMetrics &CallerMetrics = CachedFunctionInfo[Caller].Metrics;
 
   // For small functions we prefer to recalculate the cost for better accuracy.
-  if (CallerFI.Metrics.NumBlocks < 10 || CallerFI.Metrics.NumInsts < 1000) {
+  if (CallerMetrics.NumBlocks < 10 || CallerMetrics.NumInsts < 1000) {
     resetCachedCostInfo(Caller);
     return;
   }
 
   // For large functions, we can save a lot of computation time by skipping
   // recalculations.
-  if (CallerFI.Metrics.NumCalls > 0)
-    --CallerFI.Metrics.NumCalls;
+  if (CallerMetrics.NumCalls > 0)
+    --CallerMetrics.NumCalls;
 
-  if (Callee) {
-    FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
-    if (!CalleeFI.Metrics.NumBlocks) {
-      resetCachedCostInfo(Caller);
-      return;
-    }
-    CallerFI.Metrics.NeverInline |= CalleeFI.Metrics.NeverInline;
-    CallerFI.Metrics.usesDynamicAlloca |= CalleeFI.Metrics.usesDynamicAlloca;
+  if (Callee == 0) return;
+  
+  CodeMetrics &CalleeMetrics = CachedFunctionInfo[Callee].Metrics;
 
-    CallerFI.Metrics.NumInsts += CalleeFI.Metrics.NumInsts;
-    CallerFI.Metrics.NumBlocks += CalleeFI.Metrics.NumBlocks;
-    CallerFI.Metrics.NumCalls += CalleeFI.Metrics.NumCalls;
-    CallerFI.Metrics.NumVectorInsts += CalleeFI.Metrics.NumVectorInsts;
-    CallerFI.Metrics.NumRets += CalleeFI.Metrics.NumRets;
-
-    // analyzeBasicBlock counts each function argument as an inst.
-    if (CallerFI.Metrics.NumInsts >= Callee->arg_size())
-      CallerFI.Metrics.NumInsts -= Callee->arg_size();
-    else
-      CallerFI.Metrics.NumInsts = 0;
+  // If we don't have metrics for the callee, don't recalculate them just to
+  // update an approximation in the caller.  Instead, just recalculate the
+  // caller info from scratch.
+  if (CalleeMetrics.NumBlocks == 0) {
+    resetCachedCostInfo(Caller);
+    return;
   }
+  
+  // Since CalleeMetrics were already calculated, we know that the CallerMetrics
+  // reference isn't invalidated: both were in the DenseMap.  
+  CallerMetrics.NeverInline |= CalleeMetrics.NeverInline;
+  CallerMetrics.usesDynamicAlloca |= CalleeMetrics.usesDynamicAlloca;
+
+  CallerMetrics.NumInsts += CalleeMetrics.NumInsts;
+  CallerMetrics.NumBlocks += CalleeMetrics.NumBlocks;
+  CallerMetrics.NumCalls += CalleeMetrics.NumCalls;
+  CallerMetrics.NumVectorInsts += CalleeMetrics.NumVectorInsts;
+  CallerMetrics.NumRets += CalleeMetrics.NumRets;
+
+  // analyzeBasicBlock counts each function argument as an inst.
+  if (CallerMetrics.NumInsts >= Callee->arg_size())
+    CallerMetrics.NumInsts -= Callee->arg_size();
+  else
+    CallerMetrics.NumInsts = 0;
+  
   // We are not updating the argumentweights. We have already determined that
   // Caller is a fairly large function, so we accept the loss of precision.
 }





More information about the llvm-branch-commits mailing list