[llvm-commits] [llvm] r44517 - in /llvm/trunk: include/llvm/CodeGen/ include/llvm/Target/ lib/CodeGen/ lib/Target/ARM/ lib/Target/Alpha/ lib/Target/Mips/ lib/Target/PowerPC/ lib/Target/Sparc/ lib/Target/X86/

Evan Cheng evan.cheng at apple.com
Sun Dec 2 00:30:39 PST 2007


Author: evancheng
Date: Sun Dec  2 02:30:39 2007
New Revision: 44517

URL: http://llvm.org/viewvc/llvm-project?rev=44517&view=rev
Log:
Remove redundant foldMemoryOperand variants and other code clean up.

Modified:
    llvm/trunk/include/llvm/CodeGen/LiveIntervalAnalysis.h
    llvm/trunk/include/llvm/Target/MRegisterInfo.h
    llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp
    llvm/trunk/lib/CodeGen/RegAllocBigBlock.cpp
    llvm/trunk/lib/CodeGen/RegAllocLocal.cpp
    llvm/trunk/lib/CodeGen/VirtRegMap.cpp
    llvm/trunk/lib/CodeGen/VirtRegMap.h
    llvm/trunk/lib/Target/ARM/ARMRegisterInfo.cpp
    llvm/trunk/lib/Target/ARM/ARMRegisterInfo.h
    llvm/trunk/lib/Target/Alpha/AlphaRegisterInfo.cpp
    llvm/trunk/lib/Target/Alpha/AlphaRegisterInfo.h
    llvm/trunk/lib/Target/Mips/MipsRegisterInfo.cpp
    llvm/trunk/lib/Target/Mips/MipsRegisterInfo.h
    llvm/trunk/lib/Target/PowerPC/PPCRegisterInfo.cpp
    llvm/trunk/lib/Target/PowerPC/PPCRegisterInfo.h
    llvm/trunk/lib/Target/Sparc/SparcRegisterInfo.cpp
    llvm/trunk/lib/Target/Sparc/SparcRegisterInfo.h
    llvm/trunk/lib/Target/X86/X86RegisterInfo.cpp
    llvm/trunk/lib/Target/X86/X86RegisterInfo.h

Modified: llvm/trunk/include/llvm/CodeGen/LiveIntervalAnalysis.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/LiveIntervalAnalysis.h?rev=44517&r1=44516&r2=44517&view=diff

==============================================================================
--- llvm/trunk/include/llvm/CodeGen/LiveIntervalAnalysis.h (original)
+++ llvm/trunk/include/llvm/CodeGen/LiveIntervalAnalysis.h Sun Dec  2 02:30:39 2007
@@ -275,8 +275,7 @@
     /// returns true.
     bool tryFoldMemoryOperand(MachineInstr* &MI, VirtRegMap &vrm,
                               MachineInstr *DefMI, unsigned InstrIdx,
-                              unsigned OpIdx,
-                              SmallVector<unsigned, 2> &UseOps,
+                              SmallVector<unsigned, 2> &Ops,
                               bool isSS, int Slot, unsigned Reg);
 
     /// anyKillInMBBAfterIdx - Returns true if there is a kill of the specified

Modified: llvm/trunk/include/llvm/Target/MRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Target/MRegisterInfo.h?rev=44517&r1=44516&r2=44517&view=diff

==============================================================================
--- llvm/trunk/include/llvm/Target/MRegisterInfo.h (original)
+++ llvm/trunk/include/llvm/Target/MRegisterInfo.h Sun Dec  2 02:30:39 2007
@@ -533,20 +533,13 @@
                              const MachineInstr *Orig) const = 0;
 
   /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
-  /// slot into the specified machine instruction for the specified operand.  If
-  /// this is possible, a new instruction is returned with the specified operand
-  /// folded, otherwise NULL is returned. The client is responsible for removing
-  /// the old instruction and adding the new one in the instruction stream
+  /// slot into the specified machine instruction for the specified operand(s).
+  /// If this is possible, a new instruction is returned with the specified
+  /// operand folded, otherwise NULL is returned. The client is responsible for
+  /// removing the old instruction and adding the new one in the instruction
+  /// stream.
   virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
-                                          unsigned OpNum,
-                                          int FrameIndex) const {
-    return 0;
-  }
-
-  /// foldMemoryOperand - Same as previous except it tries to fold instruction
-  /// with multiple uses of the same register.
-  virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
-                                          SmallVectorImpl<unsigned> &UseOps,
+                                          SmallVectorImpl<unsigned> &Ops,
                                           int FrameIndex) const {
     return 0;
   }
@@ -555,15 +548,7 @@
   /// of any load and store from / to any address, not just from a specific
   /// stack slot.
   virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
-                                          unsigned OpNum,
-                                          MachineInstr* LoadMI) const {
-    return 0;
-  }
-
-  /// foldMemoryOperand - Same as previous except it tries to fold instruction
-  /// with multiple uses of the same register.
-  virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
-                                          SmallVectorImpl<unsigned> &UseOps,
+                                          SmallVectorImpl<unsigned> &Ops,
                                           MachineInstr* LoadMI) const {
     return 0;
   }

Modified: llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp?rev=44517&r1=44516&r2=44517&view=diff

==============================================================================
--- llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp (original)
+++ llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp Sun Dec  2 02:30:39 2007
@@ -643,28 +643,32 @@
 /// returns true.
 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
                                          VirtRegMap &vrm, MachineInstr *DefMI,
-                                         unsigned InstrIdx, unsigned OpIdx,
-                                         SmallVector<unsigned, 2> &UseOps,
+                                         unsigned InstrIdx,
+                                         SmallVector<unsigned, 2> &Ops,
                                          bool isSS, int Slot, unsigned Reg) {
-  // FIXME: fold subreg use
-  if (MI->getOperand(OpIdx).getSubReg())
-    return false;
-
-  MachineInstr *fmi = NULL;
-
-  if (UseOps.size() < 2)
-    fmi = isSS ? mri_->foldMemoryOperand(MI, OpIdx, Slot)
-               : mri_->foldMemoryOperand(MI, OpIdx, DefMI);
-  else {
-    if (OpIdx != UseOps[0])
-      // Must be two-address instruction + one more use. Not going to fold.
+  unsigned MRInfo = 0;
+  const TargetInstrDescriptor *TID = MI->getInstrDescriptor();
+  SmallVector<unsigned, 2> FoldOps;
+  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+    unsigned OpIdx = Ops[i];
+    // FIXME: fold subreg use.
+    if (MI->getOperand(OpIdx).getSubReg())
       return false;
-    // It may be possible to fold load when there are multiple uses.
-    // e.g. On x86, TEST32rr r, r -> CMP32rm [mem], 0
-    fmi = isSS ? mri_->foldMemoryOperand(MI, UseOps, Slot)
-               : mri_->foldMemoryOperand(MI, UseOps, DefMI);
+    if (MI->getOperand(OpIdx).isDef())
+      MRInfo |= (unsigned)VirtRegMap::isMod;
+    else {
+      // Filter out two-address use operand(s).
+      if (TID->getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
+        MRInfo = VirtRegMap::isModRef;
+        continue;
+      }
+      MRInfo |= (unsigned)VirtRegMap::isRef;
+    }
+    FoldOps.push_back(OpIdx);
   }
 
+  MachineInstr *fmi = isSS ? mri_->foldMemoryOperand(MI, FoldOps, Slot)
+                           : mri_->foldMemoryOperand(MI, FoldOps, DefMI);
   if (fmi) {
     // Attempt to fold the memory reference into the instruction. If
     // we can do this, we don't need to insert spill code.
@@ -674,7 +678,7 @@
       LiveVariables::transferKillDeadInfo(MI, fmi, mri_);
     MachineBasicBlock &MBB = *MI->getParent();
     if (isSS && !mf_->getFrameInfo()->isFixedObjectIndex(Slot))
-      vrm.virtFolded(Reg, MI, OpIdx, fmi);
+      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
     vrm.transferSpillPts(MI, fmi);
     vrm.transferRestorePts(MI, fmi);
     mi2iMap_.erase(MI);
@@ -775,28 +779,25 @@
 
     HasUse = mop.isUse();
     HasDef = mop.isDef();
-    SmallVector<unsigned, 2> UseOps;
-    if (HasUse)
-      UseOps.push_back(i);
-    std::vector<unsigned> UpdateOps;
+    SmallVector<unsigned, 2> Ops;
+    Ops.push_back(i);
     for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
-      if (!MI->getOperand(j).isRegister())
+      const MachineOperand &MOj = MI->getOperand(j);
+      if (!MOj.isRegister())
         continue;
-      unsigned RegJ = MI->getOperand(j).getReg();
+      unsigned RegJ = MOj.getReg();
       if (RegJ == 0 || MRegisterInfo::isPhysicalRegister(RegJ))
         continue;
       if (RegJ == RegI) {
-        UpdateOps.push_back(j);
-        if (MI->getOperand(j).isUse())
-          UseOps.push_back(j);
-        HasUse |= MI->getOperand(j).isUse();
-        HasDef |= MI->getOperand(j).isDef();
+        Ops.push_back(j);
+        HasUse |= MOj.isUse();
+        HasDef |= MOj.isDef();
       }
     }
 
     if (TryFold &&
-        tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, i,
-                             UseOps, FoldSS, FoldSlot, Reg)) {
+        tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
+                             Ops, FoldSS, FoldSlot, Reg)) {
       // Folding the load/store can completely change the instruction in
       // unpredictable ways, rescan it from the beginning.
       HasUse = false;
@@ -814,8 +815,8 @@
     mop.setReg(NewVReg);
 
     // Reuse NewVReg for other reads.
-    for (unsigned j = 0, e = UpdateOps.size(); j != e; ++j)
-      MI->getOperand(UpdateOps[j]).setReg(NewVReg);
+    for (unsigned j = 0, e = Ops.size(); j != e; ++j)
+      MI->getOperand(Ops[j]).setReg(NewVReg);
             
     if (CreatedNewVReg) {
       if (DefIsReMat) {
@@ -1226,7 +1227,7 @@
   if (!TrySplit)
     return NewLIs;
 
-  SmallVector<unsigned, 2> UseOps;
+  SmallVector<unsigned, 2> Ops;
   if (NeedStackSlot) {
     int Id = SpillMBBs.find_first();
     while (Id != -1) {
@@ -1236,41 +1237,43 @@
         unsigned VReg = spills[i].vreg;
         bool isReMat = vrm.isReMaterialized(VReg);
         MachineInstr *MI = getInstructionFromIndex(index);
-        int OpIdx = -1;
-        UseOps.clear();
+        bool CanFold = false;
+        bool FoundUse = false;
+        Ops.clear();
         if (spills[i].canFold) {
+          CanFold = true;
           for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
             MachineOperand &MO = MI->getOperand(j);
             if (!MO.isRegister() || MO.getReg() != VReg)
               continue;
-            if (MO.isDef()) {
-              OpIdx = (int)j;
+
+            Ops.push_back(j);
+            if (MO.isDef())
               continue;
-            }
-            // Can't fold if it's two-address code and the use isn't the
-            // first and only use.
-            if (isReMat ||
-                (UseOps.empty() && !alsoFoldARestore(Id, index, VReg,
-                                                  RestoreMBBs, RestoreIdxes))) {
-              OpIdx = -1;
+            if (isReMat || 
+                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
+                                                RestoreMBBs, RestoreIdxes))) {
+              // MI has two-address uses of the same register. If the use
+              // isn't the first and only use in the BB, then we can't fold
+              // it. FIXME: Move this to rewriteInstructionsForSpills.
+              CanFold = false;
               break;
             }
-            UseOps.push_back(j);
+            FoundUse = true;
           }
         }
         // Fold the store into the def if possible.
         bool Folded = false;
-        if (OpIdx != -1) {
-          if (tryFoldMemoryOperand(MI, vrm, NULL, index, OpIdx, UseOps,
-                                   true, Slot, VReg)) {
-            if (!UseOps.empty())
-              // Folded a two-address instruction, do not issue a load.
-              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
+        if (CanFold && !Ops.empty()) {
+          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
             Folded = true;
+            if (FoundUse > 0)
+              // Also folded uses, do not issue a load.
+              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
           }
         }
 
-        // Else tell the spiller to issue a store for us.
+        // Else tell the spiller to issue a spill.
         if (!Folded)
           vrm.addSpillPoint(VReg, MI);
       }
@@ -1287,41 +1290,40 @@
         continue;
       unsigned VReg = restores[i].vreg;
       MachineInstr *MI = getInstructionFromIndex(index);
-      int OpIdx = -1;
-      UseOps.clear();
+      bool CanFold = false;
+      Ops.clear();
       if (restores[i].canFold) {
+        CanFold = true;
         for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
           MachineOperand &MO = MI->getOperand(j);
           if (!MO.isRegister() || MO.getReg() != VReg)
             continue;
+
           if (MO.isDef()) {
-            // Can't fold if it's two-address code and it hasn't already
-            // been folded.
-            OpIdx = -1;
+            // If this restore were to be folded, it would have been folded
+            // already.
+            CanFold = false;
             break;
           }
-          if (UseOps.empty())
-            // Use the first use index.
-            OpIdx = (int)j;
-          UseOps.push_back(j);
+          Ops.push_back(j);
         }
       }
 
       // Fold the load into the use if possible.
       bool Folded = false;
-      if (OpIdx != -1) {
-        if (vrm.isReMaterialized(VReg)) {
+      if (CanFold && !Ops.empty()) {
+        if (!vrm.isReMaterialized(VReg))
+          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
+        else {
           MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
           int LdSlot = 0;
           bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
           // If the rematerializable def is a load, also try to fold it.
           if (isLoadSS ||
               (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG))
-            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, OpIdx,
-                                          UseOps, isLoadSS, LdSlot, VReg);
-        } else
-          Folded = tryFoldMemoryOperand(MI, vrm, NULL, index, OpIdx, UseOps,
-                                        true, Slot, VReg);
+            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
+                                          Ops, isLoadSS, LdSlot, VReg);
+        }
       }
       // If folding is not possible / failed, then tell the spiller to issue a
       // load / rematerialization for us.

Modified: llvm/trunk/lib/CodeGen/RegAllocBigBlock.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegAllocBigBlock.cpp?rev=44517&r1=44516&r2=44517&view=diff

==============================================================================
--- llvm/trunk/lib/CodeGen/RegAllocBigBlock.cpp (original)
+++ llvm/trunk/lib/CodeGen/RegAllocBigBlock.cpp Sun Dec  2 02:30:39 2007
@@ -520,7 +520,9 @@
     assignVirtToPhysReg(VirtReg, PhysReg);
   } else {  // no free registers available.
     // try to fold the spill into the instruction
-    if(MachineInstr* FMI = RegInfo->foldMemoryOperand(MI, OpNum, FrameIndex)) {
+    SmallVector<unsigned, 2> Ops;
+    Ops.push_back(OpNum);
+    if(MachineInstr* FMI = RegInfo->foldMemoryOperand(MI, Ops, FrameIndex)) {
       ++NumFolded;
       // Since we changed the address of MI, make sure to update live variables
       // to know that the new instruction has the properties of the old one.

Modified: llvm/trunk/lib/CodeGen/RegAllocLocal.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegAllocLocal.cpp?rev=44517&r1=44516&r2=44517&view=diff

==============================================================================
--- llvm/trunk/lib/CodeGen/RegAllocLocal.cpp (original)
+++ llvm/trunk/lib/CodeGen/RegAllocLocal.cpp Sun Dec  2 02:30:39 2007
@@ -473,7 +473,9 @@
     assignVirtToPhysReg(VirtReg, PhysReg);
   } else {         // No registers available.
     // If we can fold this spill into this instruction, do so now.
-    if (MachineInstr* FMI = RegInfo->foldMemoryOperand(MI, OpNum, FrameIndex)){
+    SmallVector<unsigned, 2> Ops;
+    Ops.push_back(OpNum);
+    if (MachineInstr* FMI = RegInfo->foldMemoryOperand(MI, Ops, FrameIndex)) {
       ++NumFolded;
       // Since we changed the address of MI, make sure to update live variables
       // to know that the new instruction has the properties of the old one.

Modified: llvm/trunk/lib/CodeGen/VirtRegMap.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/VirtRegMap.cpp?rev=44517&r1=44516&r2=44517&view=diff

==============================================================================
--- llvm/trunk/lib/CodeGen/VirtRegMap.cpp (original)
+++ llvm/trunk/lib/CodeGen/VirtRegMap.cpp Sun Dec  2 02:30:39 2007
@@ -115,7 +115,7 @@
 }
 
 void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI,
-                            unsigned OpNo, MachineInstr *NewMI) {
+                            MachineInstr *NewMI, ModRef MRInfo) {
   // Move previous memory references folded to new instruction.
   MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(NewMI);
   for (MI2VirtMapTy::iterator I = MI2VirtMap.lower_bound(OldMI),
@@ -124,18 +124,6 @@
     MI2VirtMap.erase(I++);
   }
 
-  ModRef MRInfo;
-  const TargetInstrDescriptor *TID = OldMI->getInstrDescriptor();
-  if (TID->getOperandConstraint(OpNo, TOI::TIED_TO) != -1 ||
-      TID->findTiedToSrcOperand(OpNo) != -1) {
-    // Folded a two-address operand.
-    MRInfo = isModRef;
-  } else if (OldMI->getOperand(OpNo).isDef()) {
-    MRInfo = isMod;
-  } else {
-    MRInfo = isRef;
-  }
-
   // add new memory reference
   MI2VirtMap.insert(IP, std::make_pair(NewMI, std::make_pair(VirtReg, MRInfo)));
 }
@@ -830,7 +818,9 @@
       NewMIs.clear();
       int Idx = NewMI->findRegisterUseOperandIdx(VirtReg);
       assert(Idx != -1);
-      MachineInstr *FoldedMI = MRI->foldMemoryOperand(NewMI, Idx, SS);
+      SmallVector<unsigned, 2> Ops;
+      Ops.push_back(Idx);
+      MachineInstr *FoldedMI = MRI->foldMemoryOperand(NewMI, Ops, SS);
       if (FoldedMI) {
         if (!VRM.hasPhys(UnfoldVR))
           VRM.assignVirt2Phys(UnfoldVR, UnfoldPR);

Modified: llvm/trunk/lib/CodeGen/VirtRegMap.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/VirtRegMap.h?rev=44517&r1=44516&r2=44517&view=diff

==============================================================================
--- llvm/trunk/lib/CodeGen/VirtRegMap.h (original)
+++ llvm/trunk/lib/CodeGen/VirtRegMap.h Sun Dec  2 02:30:39 2007
@@ -280,10 +280,9 @@
     }
 
     /// @brief Updates information about the specified virtual register's value
-    /// folded into newMI machine instruction.  The OpNum argument indicates the
-    /// operand number of OldMI that is folded.
-    void virtFolded(unsigned VirtReg, MachineInstr *OldMI, unsigned OpNum,
-                    MachineInstr *NewMI);
+    /// folded into newMI machine instruction.
+    void virtFolded(unsigned VirtReg, MachineInstr *OldMI, MachineInstr *NewMI,
+                    ModRef MRInfo);
 
     /// @brief Updates information about the specified virtual register's value
     /// folded into the specified machine instruction.

Modified: llvm/trunk/lib/Target/ARM/ARMRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMRegisterInfo.cpp?rev=44517&r1=44516&r2=44517&view=diff

==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMRegisterInfo.cpp Sun Dec  2 02:30:39 2007
@@ -347,7 +347,11 @@
 }
 
 MachineInstr *ARMRegisterInfo::foldMemoryOperand(MachineInstr *MI,
-                                                 unsigned OpNum, int FI) const {
+                                                 SmallVectorImpl<unsigned> &Ops,
+                                                 int FI) const {
+  if (Ops.size() != 1) return NULL;
+
+  unsigned OpNum = Ops[0];
   unsigned Opc = MI->getOpcode();
   MachineInstr *NewMI = NULL;
   switch (Opc) {

Modified: llvm/trunk/lib/Target/ARM/ARMRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMRegisterInfo.h?rev=44517&r1=44516&r2=44517&view=diff

==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMRegisterInfo.h (original)
+++ llvm/trunk/lib/Target/ARM/ARMRegisterInfo.h Sun Dec  2 02:30:39 2007
@@ -74,22 +74,12 @@
   void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
                      unsigned DestReg, const MachineInstr *Orig) const;
 
-  MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
-                                  int FrameIndex) const;
-
   MachineInstr* foldMemoryOperand(MachineInstr* MI,
-                                  SmallVectorImpl<unsigned> &UseOps,
-                                  int FrameIndex) const {
-    return 0;
-  }
-
-  MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
-                                  MachineInstr* LoadMI) const {
-    return 0;
-  }
+                                  SmallVectorImpl<unsigned> &Ops,
+                                  int FrameIndex) const;
 
   MachineInstr* foldMemoryOperand(MachineInstr* MI,
-                                  SmallVectorImpl<unsigned> &UseOps,
+                                  SmallVectorImpl<unsigned> &Ops,
                                   MachineInstr* LoadMI) const {
     return 0;
   }

Modified: llvm/trunk/lib/Target/Alpha/AlphaRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Alpha/AlphaRegisterInfo.cpp?rev=44517&r1=44516&r2=44517&view=diff

==============================================================================
--- llvm/trunk/lib/Target/Alpha/AlphaRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/Alpha/AlphaRegisterInfo.cpp Sun Dec  2 02:30:39 2007
@@ -153,8 +153,10 @@
 }
 
 MachineInstr *AlphaRegisterInfo::foldMemoryOperand(MachineInstr *MI,
-                                                 unsigned OpNum,
+                                                 SmallVectorImpl<unsigned> &Ops,
                                                  int FrameIndex) const {
+   if (Ops.size() != 1) return NULL;
+
    // Make sure this is a reg-reg copy.
    unsigned Opc = MI->getOpcode();
 
@@ -166,7 +168,7 @@
    case Alpha::CPYSS:
    case Alpha::CPYST:
      if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
-       if (OpNum == 0) {  // move -> store
+       if (Ops[0] == 0) {  // move -> store
          unsigned InReg = MI->getOperand(1).getReg();
          Opc = (Opc == Alpha::BISr) ? Alpha::STQ : 
            ((Opc == Alpha::CPYSS) ? Alpha::STS : Alpha::STT);

Modified: llvm/trunk/lib/Target/Alpha/AlphaRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Alpha/AlphaRegisterInfo.h?rev=44517&r1=44516&r2=44517&view=diff

==============================================================================
--- llvm/trunk/lib/Target/Alpha/AlphaRegisterInfo.h (original)
+++ llvm/trunk/lib/Target/Alpha/AlphaRegisterInfo.h Sun Dec  2 02:30:39 2007
@@ -48,22 +48,12 @@
                        const TargetRegisterClass *RC,
                        SmallVectorImpl<MachineInstr*> &NewMIs) const;
 
-  MachineInstr* foldMemoryOperand(MachineInstr *MI, unsigned OpNum, 
-                                  int FrameIndex) const;
-
   MachineInstr* foldMemoryOperand(MachineInstr* MI,
-                                  SmallVectorImpl<unsigned> &UseOps,
-                                  int FrameIndex) const {
-    return 0;
-  }
-
-  MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
-                                  MachineInstr* LoadMI) const {
-    return 0;
-  }
+                                  SmallVectorImpl<unsigned> &Ops,
+                                  int FrameIndex) const;
 
   MachineInstr* foldMemoryOperand(MachineInstr* MI,
-                                  SmallVectorImpl<unsigned> &UseOps,
+                                  SmallVectorImpl<unsigned> &Ops,
                                   MachineInstr* LoadMI) const {
     return 0;
   }

Modified: llvm/trunk/lib/Target/Mips/MipsRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsRegisterInfo.cpp?rev=44517&r1=44516&r2=44517&view=diff

==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/Mips/MipsRegisterInfo.cpp Sun Dec  2 02:30:39 2007
@@ -176,8 +176,11 @@
 }
 
 MachineInstr *MipsRegisterInfo::
-foldMemoryOperand(MachineInstr* MI, unsigned OpNum, int FI) const 
+foldMemoryOperand(MachineInstr* MI,
+                  SmallVectorImpl<unsigned> &Ops, int FI) const 
 {
+  if (Ops.size() != 1) return NULL;
+
   MachineInstr *NewMI = NULL;
 
   switch (MI->getOpcode()) 
@@ -188,10 +191,10 @@
         (MI->getOperand(1).getReg() == Mips::ZERO) &&
         (MI->getOperand(2).isRegister())) 
       {
-        if (OpNum == 0)    // COPY -> STORE
+        if (Ops[0] == 0)    // COPY -> STORE
           NewMI = BuildMI(TII.get(Mips::SW)).addFrameIndex(FI)
                   .addImm(0).addReg(MI->getOperand(2).getReg());
-        else               // COPY -> LOAD
+        else                   // COPY -> LOAD
           NewMI = BuildMI(TII.get(Mips::LW), MI->getOperand(0)
                   .getReg()).addImm(0).addFrameIndex(FI);
       }

Modified: llvm/trunk/lib/Target/Mips/MipsRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsRegisterInfo.h?rev=44517&r1=44516&r2=44517&view=diff

==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsRegisterInfo.h (original)
+++ llvm/trunk/lib/Target/Mips/MipsRegisterInfo.h Sun Dec  2 02:30:39 2007
@@ -55,22 +55,12 @@
   void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
                      unsigned DestReg, const MachineInstr *Orig) const;
 
-  MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
-                                  int FrameIndex) const;
-
   MachineInstr* foldMemoryOperand(MachineInstr* MI,
-                                  SmallVectorImpl<unsigned> &UseOps,
-                                  int FrameIndex) const {
-    return 0;
-  }
-
-  MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
-                                  MachineInstr* LoadMI) const {
-    return 0;
-  }
+                                  SmallVectorImpl<unsigned> &Ops,
+                                  int FrameIndex) const;
 
   MachineInstr* foldMemoryOperand(MachineInstr* MI,
-                                  SmallVectorImpl<unsigned> &UseOps,
+                                  SmallVectorImpl<unsigned> &Ops,
                                   MachineInstr* LoadMI) const {
     return 0;
   }

Modified: llvm/trunk/lib/Target/PowerPC/PPCRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCRegisterInfo.cpp?rev=44517&r1=44516&r2=44517&view=diff

==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCRegisterInfo.cpp Sun Dec  2 02:30:39 2007
@@ -555,11 +555,14 @@
 /// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
 /// copy instructions, turning them into load/store instructions.
 MachineInstr *PPCRegisterInfo::foldMemoryOperand(MachineInstr *MI,
-                                                 unsigned OpNum,
-                                                 int FrameIndex) const {
+                                              SmallVectorImpl<unsigned> &Ops,
+                                              int FrameIndex) const {
+  if (Ops.size() != 1) return NULL;
+
   // Make sure this is a reg-reg copy.  Note that we can't handle MCRF, because
   // it takes more than one instruction to store it.
   unsigned Opc = MI->getOpcode();
+  unsigned OpNum = Ops[0];
 
   MachineInstr *NewMI = NULL;
   if ((Opc == PPC::OR &&

Modified: llvm/trunk/lib/Target/PowerPC/PPCRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCRegisterInfo.h?rev=44517&r1=44516&r2=44517&view=diff

==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCRegisterInfo.h (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCRegisterInfo.h Sun Dec  2 02:30:39 2007
@@ -65,22 +65,12 @@
 
   /// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
   /// copy instructions, turning them into load/store instructions.
-  virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
-                                          int FrameIndex) const;
-  
   virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
-                                          SmallVectorImpl<unsigned> &UseOps,
-                                          int FrameIndex) const {
-    return 0;
-  }
-
-  virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
-                                          MachineInstr* LoadMI) const {
-    return 0;
-  }
+                                          SmallVectorImpl<unsigned> &Ops,
+                                          int FrameIndex) const;
 
   virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
-                                          SmallVectorImpl<unsigned> &UseOps,
+                                          SmallVectorImpl<unsigned> &Ops,
                                           MachineInstr* LoadMI) const {
     return 0;
   }

Modified: llvm/trunk/lib/Target/Sparc/SparcRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Sparc/SparcRegisterInfo.cpp?rev=44517&r1=44516&r2=44517&view=diff

==============================================================================
--- llvm/trunk/lib/Target/Sparc/SparcRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/Sparc/SparcRegisterInfo.cpp Sun Dec  2 02:30:39 2007
@@ -148,8 +148,11 @@
 }
 
 MachineInstr *SparcRegisterInfo::foldMemoryOperand(MachineInstr* MI,
-                                                   unsigned OpNum,
-                                                   int FI) const {
+                                                 SmallVectorImpl<unsigned> &Ops,
+                                                 int FI) const {
+  if (Ops.size() != 1) return NULL;
+
+  unsigned OpNum = Ops[0];
   bool isFloat = false;
   MachineInstr *NewMI = NULL;
   switch (MI->getOpcode()) {

Modified: llvm/trunk/lib/Target/Sparc/SparcRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Sparc/SparcRegisterInfo.h?rev=44517&r1=44516&r2=44517&view=diff

==============================================================================
--- llvm/trunk/lib/Target/Sparc/SparcRegisterInfo.h (original)
+++ llvm/trunk/lib/Target/Sparc/SparcRegisterInfo.h Sun Dec  2 02:30:39 2007
@@ -59,23 +59,11 @@
                      unsigned DestReg, const MachineInstr *Orig) const;
 
   virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
-                                          unsigned OpNum,
+                                          SmallVectorImpl<unsigned> &Ops,
                                           int FrameIndex) const;
 
   virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
-                                          SmallVectorImpl<unsigned> &UseOps,
-                                          int FrameIndex) const {
-    return 0;
-  }
-
-  virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
-                                          unsigned OpNum,
-                                          MachineInstr* LoadMI) const {
-    return 0;
-  }
-
-  virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
-                                          SmallVectorImpl<unsigned> &UseOps,
+                                          SmallVectorImpl<unsigned> &Ops,
                                           MachineInstr* LoadMI) const {
     return 0;
   }

Modified: llvm/trunk/lib/Target/X86/X86RegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86RegisterInfo.cpp?rev=44517&r1=44516&r2=44517&view=diff

==============================================================================
--- llvm/trunk/lib/Target/X86/X86RegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86RegisterInfo.cpp Sun Dec  2 02:30:39 2007
@@ -1140,73 +1140,58 @@
 }
 
 
-MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned OpNum,
-                                                 int FrameIndex) const {
-  // Check switch flag 
-  if (NoFusing) return NULL;
-  SmallVector<MachineOperand,4> MOs;
-  MOs.push_back(MachineOperand::CreateFrameIndex(FrameIndex));
-  return foldMemoryOperand(MI, OpNum, MOs);
-}
-
 MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
-                                              SmallVectorImpl<unsigned> &UseOps,
+                                              SmallVectorImpl<unsigned> &Ops,
                                               int FrameIndex) const {
   // Check switch flag 
   if (NoFusing) return NULL;
 
-  if (UseOps.size() == 1)
-    return foldMemoryOperand(MI, UseOps[0], FrameIndex);
-  else if (UseOps.size() != 2 || UseOps[0] != 0 && UseOps[1] != 1)
+  if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
+    unsigned NewOpc = 0;
+    switch (MI->getOpcode()) {
+    default: return NULL;
+    case X86::TEST8rr:  NewOpc = X86::CMP8ri; break;
+    case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
+    case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
+    case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
+    }
+    // Change to CMPXXri r, 0 first.
+    MI->setInstrDescriptor(TII.get(NewOpc));
+    MI->getOperand(1).ChangeToImmediate(0);
+  } else if (Ops.size() != 1)
     return NULL;
 
-  unsigned NewOpc = 0;
-  switch (MI->getOpcode()) {
-  default: return NULL;
-  case X86::TEST8rr:  NewOpc = X86::CMP8ri; break;
-  case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
-  case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
-  case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
-  }
-  // Change to CMPXXri r, 0 first.
-  MI->setInstrDescriptor(TII.get(NewOpc));
-  MI->getOperand(1).ChangeToImmediate(0);
-  return foldMemoryOperand(MI, 0, FrameIndex);
-}
-
-MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned OpNum,
-                                                 MachineInstr *LoadMI) const {
-  // Check switch flag 
-  if (NoFusing) return NULL;
   SmallVector<MachineOperand,4> MOs;
-  unsigned NumOps = TII.getNumOperands(LoadMI->getOpcode());
-  for (unsigned i = NumOps - 4; i != NumOps; ++i)
-    MOs.push_back(LoadMI->getOperand(i));
-  return foldMemoryOperand(MI, OpNum, MOs);
+  MOs.push_back(MachineOperand::CreateFrameIndex(FrameIndex));
+  return foldMemoryOperand(MI, Ops[0], MOs);
 }
 
 MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
-                                                 SmallVectorImpl<unsigned> &UseOps,
+                                                 SmallVectorImpl<unsigned> &Ops,
                                                  MachineInstr *LoadMI) const {
   // Check switch flag 
   if (NoFusing) return NULL;
 
-  if (UseOps.size() == 1)
-    return foldMemoryOperand(MI, UseOps[0], LoadMI);
-  else if (UseOps.size() != 2 || UseOps[0] != 0 && UseOps[1] != 1)
+  if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
+    unsigned NewOpc = 0;
+    switch (MI->getOpcode()) {
+    default: return NULL;
+    case X86::TEST8rr:  NewOpc = X86::CMP8ri; break;
+    case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
+    case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
+    case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
+    }
+    // Change to CMPXXri r, 0 first.
+    MI->setInstrDescriptor(TII.get(NewOpc));
+    MI->getOperand(1).ChangeToImmediate(0);
+  } else if (Ops.size() != 1)
     return NULL;
-  unsigned NewOpc = 0;
-  switch (MI->getOpcode()) {
-  default: return NULL;
-  case X86::TEST8rr:  NewOpc = X86::CMP8ri; break;
-  case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
-  case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
-  case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
-  }
-  // Change to CMPXXri r, 0 first.
-  MI->setInstrDescriptor(TII.get(NewOpc));
-  MI->getOperand(1).ChangeToImmediate(0);
-  return foldMemoryOperand(MI, 0, LoadMI);
+
+  SmallVector<MachineOperand,4> MOs;
+  unsigned NumOps = TII.getNumOperands(LoadMI->getOpcode());
+  for (unsigned i = NumOps - 4; i != NumOps; ++i)
+    MOs.push_back(LoadMI->getOperand(i));
+  return foldMemoryOperand(MI, Ops[0], MOs);
 }
 
 

Modified: llvm/trunk/lib/Target/X86/X86RegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86RegisterInfo.h?rev=44517&r1=44516&r2=44517&view=diff

==============================================================================
--- llvm/trunk/lib/Target/X86/X86RegisterInfo.h (original)
+++ llvm/trunk/lib/Target/X86/X86RegisterInfo.h Sun Dec  2 02:30:39 2007
@@ -133,32 +133,19 @@
 
   /// foldMemoryOperand - If this target supports it, fold a load or store of
   /// the specified stack slot into the specified machine instruction for the
-  /// specified operand.  If this is possible, the target should perform the
+  /// specified operand(s).  If this is possible, the target should perform the
   /// folding and return true, otherwise it should return false.  If it folds
   /// the instruction, it is likely that the MachineInstruction the iterator
   /// references has been changed.
   MachineInstr* foldMemoryOperand(MachineInstr* MI,
-                                  unsigned OpNum,
-                                  int FrameIndex) const;
-
-  /// foldMemoryOperand - Same as previous except it tries to fold instruction
-  /// with multiple uses of the same register.
-  MachineInstr* foldMemoryOperand(MachineInstr* MI,
-                                  SmallVectorImpl<unsigned> &UseOps,
+                                  SmallVectorImpl<unsigned> &Ops,
                                   int FrameIndex) const;
 
   /// foldMemoryOperand - Same as the previous version except it allows folding
   /// of any load and store from / to any address, not just from a specific
   /// stack slot.
   MachineInstr* foldMemoryOperand(MachineInstr* MI,
-                                  unsigned OpNum,
-                                  MachineInstr* LoadMI) const;
-
-  /// foldMemoryOperand - Same as the previous version except it allows folding
-  /// of any load and store from / to any address, not just from a specific
-  /// stack slot.
-  MachineInstr* foldMemoryOperand(MachineInstr* MI,
-                                  SmallVectorImpl<unsigned> &UseOps,
+                                  SmallVectorImpl<unsigned> &Ops,
                                   MachineInstr* LoadMI) const;
 
   /// getOpcodeAfterMemoryFold - Returns the opcode of the would be new





More information about the llvm-commits mailing list