[llvm-commits] [llvm] r152486 - /llvm/trunk/lib/CodeGen/SjLjEHPrepare.cpp

Bill Wendling isanbard at gmail.com
Fri Mar 9 23:11:56 PST 2012


Author: void
Date: Sat Mar 10 01:11:55 2012
New Revision: 152486

URL: http://llvm.org/viewvc/llvm-project?rev=152486&view=rev
Log:
Implement a more intelligent way of spilling uses across an invoke boundary.

The old way of determine when and where to spill a value that was used inside of
a landing pad resulted in spilling that value everywhere and not just at the
invoke edge.

This algorithm determines which values are used within a landing pad. It then
spills those values before the invoke and reloads them before the uses. This
should prevent excessive spilling in many cases, e.g. inside of loops.
<rdar://problem/10609139>

Modified:
    llvm/trunk/lib/CodeGen/SjLjEHPrepare.cpp

Modified: llvm/trunk/lib/CodeGen/SjLjEHPrepare.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SjLjEHPrepare.cpp?rev=152486&r1=152485&r2=152486&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SjLjEHPrepare.cpp (original)
+++ llvm/trunk/lib/CodeGen/SjLjEHPrepare.cpp Sat Mar 10 01:11:55 2012
@@ -21,6 +21,7 @@
 #include "llvm/LLVMContext.h"
 #include "llvm/Module.h"
 #include "llvm/Pass.h"
+#include "llvm/Analysis/Verifier.h"
 #include "llvm/CodeGen/Passes.h"
 #include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetLowering.h"
@@ -139,14 +140,19 @@
   Builder.CreateStore(CallSiteNoC, CallSite, true/*volatile*/);
 }
 
-/// MarkBlocksLiveIn - Insert BB and all of its predescessors into LiveBBs until
+/// markBlocksLiveIn - Insert BB and all of its predescessors into LiveBBs until
 /// we reach blocks we've already seen.
-static void MarkBlocksLiveIn(BasicBlock *BB,
-                             SmallPtrSet<BasicBlock*, 64> &LiveBBs) {
+static void markBlocksLiveIn(BasicBlock *BB, Instruction *Inst,
+                             SmallPtrSet<BasicBlock*, 64> &LiveBBs,
+                             SmallPtrSet<BasicBlock*, 4> &InvokesCrossed) {
   if (!LiveBBs.insert(BB)) return; // already been here.
 
-  for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
-    MarkBlocksLiveIn(*PI, LiveBBs);
+  for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
+    BasicBlock *Pred = *PI;
+    if (BB->isLandingPad() && BB != Inst->getParent())
+      InvokesCrossed.insert(Pred);
+    markBlocksLiveIn(Pred, Inst, LiveBBs, InvokesCrossed);
+  }
 }
 
 /// substituteLPadValues - Substitute the values returned by the landingpad
@@ -297,6 +303,9 @@
 /// edge and spill them.
 void SjLjEHPass::lowerAcrossUnwindEdges(Function &F,
                                         ArrayRef<InvokeInst*> Invokes) {
+  SmallVector<std::pair<Instruction*, Instruction*>, 32> ReloadUsers;
+  DenseMap<std::pair<Instruction*, Instruction*>, AllocaInst*> AllocaMap;
+
   // Finally, scan the code looking for instructions with bad live ranges.
   for (Function::iterator
          BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
@@ -327,44 +336,81 @@
       }
 
       // Find all of the blocks that this value is live in.
-      SmallPtrSet<BasicBlock*, 64> LiveBBs;
-      LiveBBs.insert(Inst->getParent());
+      std::map<Instruction*, SmallPtrSet<BasicBlock*, 4> > InvokesCrossed;
+      std::map<Instruction*, SmallPtrSet<BasicBlock*, 64> > LiveBBs;
       while (!Users.empty()) {
-        Instruction *U = Users.back();
-        Users.pop_back();
+        Instruction *U = Users.pop_back_val();
+        LiveBBs[U].insert(Inst->getParent());
 
-        if (!isa<PHINode>(U)) {
-          MarkBlocksLiveIn(U->getParent(), LiveBBs);
-        } else {
+        if (PHINode *PN = dyn_cast<PHINode>(U)) {
           // Uses for a PHI node occur in their predecessor block.
-          PHINode *PN = cast<PHINode>(U);
           for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
             if (PN->getIncomingValue(i) == Inst)
-              MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs);
+              markBlocksLiveIn(PN->getIncomingBlock(i), Inst, LiveBBs[U],
+                               InvokesCrossed[U]);
+        } else {
+          markBlocksLiveIn(U->getParent(), Inst, LiveBBs[U], InvokesCrossed[U]);
         }
       }
 
-      // Now that we know all of the blocks that this thing is live in, see if
-      // it includes any of the unwind locations.
-      bool NeedsSpill = false;
-      for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
-        BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
-        if (UnwindBlock != BB && LiveBBs.count(UnwindBlock)) {
-          DEBUG(dbgs() << "SJLJ Spill: " << *Inst << " around "
-                << UnwindBlock->getName() << "\n");
-          NeedsSpill = true;
-          break;
+      // Go through the invokes the value crosses and insert a spill right
+      // before the invoke.
+      for (std::map<Instruction*, SmallPtrSet<BasicBlock*, 4> >::iterator
+             MI = InvokesCrossed.begin(), ME = InvokesCrossed.end();
+           MI != ME; ++MI) {
+        Instruction *User = MI->first;
+        SmallPtrSet<BasicBlock*, 4> &Crossings = MI->second;
+        if (Crossings.empty()) continue;
+
+        ReloadUsers.push_back(std::make_pair(Inst, User));
+
+        AllocaInst *&Slot = AllocaMap[std::make_pair(Inst, User)];
+        if (!Slot)
+          Slot = new AllocaInst(Inst->getType(), 0,
+                                Inst->getName() + ".reg2mem",
+                                F.getEntryBlock().begin());
+
+        for (SmallPtrSet<BasicBlock*, 4>::iterator
+               CI = Crossings.begin(), CE = Crossings.end(); CI != CE; ++CI) {
+          new StoreInst(Inst, Slot, (*CI)->getTerminator());
+          ++NumSpilled;
         }
       }
+    }
+  }
 
-      // If we decided we need a spill, do it.
-      // FIXME: Spilling this way is overkill, as it forces all uses of
-      // the value to be reloaded from the stack slot, even those that aren't
-      // in the unwind blocks. We should be more selective.
-      if (NeedsSpill) {
-        DemoteRegToStack(*Inst, true);
-        ++NumSpilled;
-      }
+  // Now go through the instructions which were spilled and replace their uses
+  // after a crossed invoke with a reload instruction.
+  for (SmallVectorImpl<std::pair<Instruction*, Instruction*> >::iterator
+         I = ReloadUsers.begin(), E = ReloadUsers.end(); I != E; ++I) {
+    Instruction *User = I->second;
+    AllocaInst *Slot = AllocaMap[*I];
+    assert(Slot && "A spill slot hasn't been allocated yet!");
+
+    if (PHINode *PN = dyn_cast<PHINode>(User)) {
+      // If this is a PHI node, we can't insert a load of the value before the
+      // use. Instead insert the load in the predecessor block corresponding to
+      // the incoming value.
+      //
+      // Note that if there are multiple edges from a basic block to this PHI
+      // node that we cannot have multiple loads. The problem is that the
+      // resulting PHI node will have multiple values (from each load) coming in
+      // from the same block, which is illegal SSA form. For this reason, we
+      // keep track of and reuse loads we insert.
+      DenseMap<BasicBlock*, Value*> Loads;
+      for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
+        if (PN->getIncomingValue(i) == I->first) {
+          Value *&V = Loads[PN->getIncomingBlock(i)];
+          if (V == 0)
+            // Insert the load into the predecessor block
+            V = new LoadInst(Slot, I->first->getName() + ".reload", true,
+                             PN->getIncomingBlock(i)->getTerminator());
+
+          PN->setIncomingValue(i, V);
+        }
+    } else {
+      LoadInst *Reload = new LoadInst(Slot, Slot->getName() + ".reload", User);
+      User->replaceUsesOfWith(I->first, Reload);
     }
   }
 
@@ -521,5 +567,9 @@
 
 bool SjLjEHPass::runOnFunction(Function &F) {
   bool Res = setupEntryBlockAndCallSites(F);
+  DEBUG({
+      if (verifyFunction(F))
+        report_fatal_error("verifyFunction failed!");
+    });
   return Res;
 }





More information about the llvm-commits mailing list