[llvm-commits] [llvm] r44601 - in /llvm/trunk: include/llvm/CodeGen/LiveIntervalAnalysis.h lib/CodeGen/LiveIntervalAnalysis.cpp lib/CodeGen/VirtRegMap.cpp

Evan Cheng evan.cheng at apple.com
Tue Dec 4 19:22:34 PST 2007


Author: evancheng
Date: Tue Dec  4 21:22:34 2007
New Revision: 44601

URL: http://llvm.org/viewvc/llvm-project?rev=44601&view=rev
Log:
- Mark last use of a split interval as kill instead of letting spiller track it.
  This allows an important optimization to be re-enabled.
- If all uses / defs of a split interval can be folded, give the interval a
  low spill weight so it would not be picked in case spilling is needed (avoid
  pushing other intervals in the same BB to be spilled).

Modified:
    llvm/trunk/include/llvm/CodeGen/LiveIntervalAnalysis.h
    llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp
    llvm/trunk/lib/CodeGen/VirtRegMap.cpp

Modified: llvm/trunk/include/llvm/CodeGen/LiveIntervalAnalysis.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/LiveIntervalAnalysis.h?rev=44601&r1=44600&r2=44601&view=diff

==============================================================================
--- llvm/trunk/include/llvm/CodeGen/LiveIntervalAnalysis.h (original)
+++ llvm/trunk/include/llvm/CodeGen/LiveIntervalAnalysis.h Tue Dec  4 21:22:34 2007
@@ -278,6 +278,8 @@
                               SmallVector<unsigned, 2> &Ops,
                               bool isSS, int Slot, unsigned Reg);
 
+    /// canFoldMemoryOperand - Returns true if the specified load / store
+    /// folding is possible.
     bool canFoldMemoryOperand(MachineInstr *MI,
                               SmallVector<unsigned, 2> &Ops) const;
 

Modified: llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp?rev=44601&r1=44600&r2=44601&view=diff

==============================================================================
--- llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp (original)
+++ llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp Tue Dec  4 21:22:34 2007
@@ -691,6 +691,22 @@
   return false;
 }
 
+/// canFoldMemoryOperand - Returns true if the specified load / store
+/// folding is possible.
+bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
+                                         SmallVector<unsigned, 2> &Ops) const {
+  SmallVector<unsigned, 2> FoldOps;
+  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+    unsigned OpIdx = Ops[i];
+    // FIXME: fold subreg use.
+    if (MI->getOperand(OpIdx).getSubReg())
+      return false;
+    FoldOps.push_back(OpIdx);
+  }
+
+  return mri_->canFoldMemoryOperand(MI, FoldOps);
+}
+
 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
   SmallPtrSet<MachineBasicBlock*, 4> MBBs;
   for (LiveInterval::Ranges::const_iterator
@@ -710,7 +726,7 @@
 
 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
-void LiveIntervals::
+bool LiveIntervals::
 rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
                  unsigned id, unsigned index, unsigned end,  MachineInstr *MI,
                  MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
@@ -723,6 +739,7 @@
                  const LoopInfo *loopInfo,
                  std::map<unsigned,unsigned> &MBBVRegsMap,
                  std::vector<LiveInterval*> &NewLIs) {
+  bool CanFold = false;
  RestartInstruction:
   for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
     MachineOperand& mop = MI->getOperand(i);
@@ -760,11 +777,6 @@
       }
     }
 
-    // Do not fold load / store here if we are splitting. We'll find an
-    // optimal point to insert a load / store later.
-    if (TryFold)
-      TryFold = !TrySplit && NewVReg == 0;
-
     // Scan all of the operands of this instruction rewriting operands
     // to use NewVReg instead of li.reg as appropriate.  We do this for
     // two reasons:
@@ -795,15 +807,23 @@
       }
     }
 
-    if (TryFold &&
-        tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
-                             Ops, FoldSS, FoldSlot, Reg)) {
-      // Folding the load/store can completely change the instruction in
-      // unpredictable ways, rescan it from the beginning.
-      HasUse = false;
-      HasDef = false;
-      goto RestartInstruction;
-    }
+    if (TryFold) {
+      // Do not fold load / store here if we are splitting. We'll find an
+      // optimal point to insert a load / store later.
+      if (!TrySplit) {
+        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
+                                 Ops, FoldSS, FoldSlot, Reg)) {
+          // Folding the load/store can completely change the instruction in
+          // unpredictable ways, rescan it from the beginning.
+          HasUse = false;
+          HasDef = false;
+          CanFold = false;
+          goto RestartInstruction;
+        }
+      } else {
+        CanFold = canFoldMemoryOperand(MI, Ops);
+      }
+    } else CanFold = false;
 
     // Create a new virtual register for the spill interval.
     bool CreatedNewVReg = false;
@@ -879,8 +899,8 @@
     nI.print(DOUT, mri_);
     DOUT << '\n';
   }
+  return CanFold;
 }
-
 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
                                    const VNInfo *VNI,
                                    MachineBasicBlock *MBB, unsigned Idx) const {
@@ -920,6 +940,7 @@
                     std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
                     std::map<unsigned,unsigned> &MBBVRegsMap,
                     std::vector<LiveInterval*> &NewLIs) {
+  bool AllCanFold = true;
   unsigned NewVReg = 0;
   unsigned index = getBaseIndex(I->start);
   unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
@@ -931,12 +952,12 @@
 
     MachineInstr *MI = getInstructionFromIndex(index);
     MachineBasicBlock *MBB = MI->getParent();
-    NewVReg = 0;
+    unsigned ThisVReg = 0;
     if (TrySplit) {
       std::map<unsigned,unsigned>::const_iterator NVI =
         MBBVRegsMap.find(MBB->getNumber());
       if (NVI != MBBVRegsMap.end()) {
-        NewVReg = NVI->second;
+        ThisVReg = NVI->second;
         // One common case:
         // x = use
         // ...
@@ -959,21 +980,35 @@
         }
         if (MIHasDef && !MIHasUse) {
           MBBVRegsMap.erase(MBB->getNumber());
-          NewVReg = 0;
+          ThisVReg = 0;
         }
       }
     }
-    bool IsNew = NewVReg == 0;
+
+    bool IsNew = ThisVReg == 0;
+    if (IsNew) {
+      // This ends the previous live interval. If all of its def / use
+      // can be folded, give it a low spill weight.
+      if (NewVReg && TrySplit && AllCanFold) {
+        LiveInterval &nI = getOrCreateInterval(NewVReg);
+        nI.weight /= 10.0F;
+      }
+      AllCanFold = true;
+    }
+    NewVReg = ThisVReg;
+
     bool HasDef = false;
     bool HasUse = false;
-    rewriteInstructionForSpills(li, TrySplit, I->valno->id, index, end,
-                                MI, ReMatOrigDefMI, ReMatDefMI, Slot, LdSlot,
-                                isLoad, isLoadSS, DefIsReMat, CanDelete, vrm,
-                                RegMap, rc, ReMatIds, NewVReg, HasDef, HasUse,
-                                loopInfo, MBBVRegsMap, NewLIs);
+    bool CanFold = rewriteInstructionForSpills(li, TrySplit, I->valno->id,
+                                index, end, MI, ReMatOrigDefMI, ReMatDefMI,
+                                Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
+                                CanDelete, vrm, RegMap, rc, ReMatIds, NewVReg,
+                                HasDef, HasUse, loopInfo, MBBVRegsMap, NewLIs);
     if (!HasDef && !HasUse)
       continue;
 
+    AllCanFold &= CanFold;
+
     // Update weight of spill interval.
     LiveInterval &nI = getOrCreateInterval(NewVReg);
     if (!TrySplit) {
@@ -1058,6 +1093,12 @@
     unsigned loopDepth = loopInfo->getLoopDepth(MBB->getBasicBlock());
     nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
   }
+
+  if (NewVReg && TrySplit && AllCanFold) {
+    // If all of its def / use can be folded, give it a low spill weight.
+    LiveInterval &nI = getOrCreateInterval(NewVReg);
+    nI.weight /= 10.0F;
+  }
 }
 
 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
@@ -1331,8 +1372,14 @@
       // load / rematerialization for us.
       if (Folded)
         nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
-      else
+      else {
         vrm.addRestorePoint(VReg, MI);
+        LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
+        MachineInstr *LastUse = getInstructionFromIndex(getBaseIndex(LR->end));
+        int UseIdx = LastUse->findRegisterUseOperandIdx(VReg);
+        assert(UseIdx != -1);
+        LastUse->getOperand(UseIdx).setIsKill();
+      }
     }
     Id = RestoreMBBs.find_next(Id);
   }

Modified: llvm/trunk/lib/CodeGen/VirtRegMap.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/VirtRegMap.cpp?rev=44601&r1=44600&r2=44601&view=diff

==============================================================================
--- llvm/trunk/lib/CodeGen/VirtRegMap.cpp (original)
+++ llvm/trunk/lib/CodeGen/VirtRegMap.cpp Tue Dec  4 21:22:34 2007
@@ -1295,8 +1295,6 @@
           // the value and there isn't an earlier def that has already clobbered the
           // physreg.
           if (PhysReg &&
-              Spills.canClobberPhysReg(SS) &&
-              !ReusedOperands.isClobbered(PhysReg) &&
               DeadStore->findRegisterUseOperandIdx(PhysReg, true) != -1 &&
               MRI->unfoldMemoryOperand(MF, &MI, PhysReg, false, true, NewMIs)) {
             MBB.insert(MII, NewMIs[0]);





More information about the llvm-commits mailing list