[llvm] r279124 - RegScavenging: Add scavengeRegisterBackwards()

Matthias Braun via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 18 12:47:59 PDT 2016


Author: matze
Date: Thu Aug 18 14:47:59 2016
New Revision: 279124

URL: http://llvm.org/viewvc/llvm-project?rev=279124&view=rev
Log:
RegScavenging: Add scavengeRegisterBackwards()

Re-apply r276044 with off-by-1 instruction fix for the reload placement.

This is a variant of scavengeRegister() that works for
enterBasicBlockEnd()/backward(). The benefit of the backward mode is
that it is not affected by incomplete kill flags.

This patch also changes
PrologEpilogInserter::doScavengeFrameVirtualRegs() to use the register
scavenger in backwards mode.

Differential Revision: http://reviews.llvm.org/D21885

Modified:
    llvm/trunk/include/llvm/CodeGen/RegisterScavenging.h
    llvm/trunk/lib/CodeGen/PrologEpilogInserter.cpp
    llvm/trunk/lib/CodeGen/RegisterScavenging.cpp
    llvm/trunk/test/CodeGen/AMDGPU/captured-frame-index.ll
    llvm/trunk/test/CodeGen/Mips/emergency-spill-slot-near-fp.ll
    llvm/trunk/test/CodeGen/PowerPC/dyn-alloca-aligned.ll
    llvm/trunk/test/CodeGen/Thumb/large-stack.ll

Modified: llvm/trunk/include/llvm/CodeGen/RegisterScavenging.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/RegisterScavenging.h?rev=279124&r1=279123&r2=279124&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/RegisterScavenging.h (original)
+++ llvm/trunk/include/llvm/CodeGen/RegisterScavenging.h Thu Aug 18 14:47:59 2016
@@ -157,12 +157,24 @@ public:
   /// available and do the appropriate bookkeeping. SPAdj is the stack
   /// adjustment due to call frame, it's passed along to eliminateFrameIndex().
   /// Returns the scavenged register.
+  /// This function performs worse if kill flags are incomplete, consider using
+  /// scavengeRegisterBackwards() instead!
   unsigned scavengeRegister(const TargetRegisterClass *RegClass,
                             MachineBasicBlock::iterator I, int SPAdj);
   unsigned scavengeRegister(const TargetRegisterClass *RegClass, int SPAdj) {
     return scavengeRegister(RegClass, MBBI, SPAdj);
   }
 
+  /// Make a register of the specific register class available from the current
+  /// position backwards to the place before \p To. If \p RestoreAfter is true
+  /// this includes the instruction following the current position.
+  /// SPAdj is the stack adjustment due to call frame, it's passed along to
+  /// eliminateFrameIndex().
+  /// Returns the scavenged register.
+  unsigned scavengeRegisterBackwards(const TargetRegisterClass &RC,
+                                     MachineBasicBlock::iterator To,
+                                     bool RestoreAfter, int SPAdj);
+
   /// Tell the scavenger a register is used.
   void setRegUsed(unsigned Reg, LaneBitmask LaneMask = ~0u);
 private:
@@ -202,6 +214,12 @@ private:
 
   /// Mark live-in registers of basic block as used.
   void setLiveInsUsed(const MachineBasicBlock &MBB);
+
+  /// Spill a register after position \p After and reload it before position
+  /// \p UseMI.
+  ScavengedInfo &spill(unsigned Reg, const TargetRegisterClass &RC, int SPAdj,
+                       MachineBasicBlock::iterator After,
+                       MachineBasicBlock::iterator &UseMI);
 };
 
 } // End llvm namespace

Modified: llvm/trunk/lib/CodeGen/PrologEpilogInserter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/PrologEpilogInserter.cpp?rev=279124&r1=279123&r2=279124&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/PrologEpilogInserter.cpp (original)
+++ llvm/trunk/lib/CodeGen/PrologEpilogInserter.cpp Thu Aug 18 14:47:59 2016
@@ -41,6 +41,7 @@
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetRegisterInfo.h"
 #include "llvm/Target/TargetSubtargetInfo.h"
+#include <algorithm>
 #include <climits>
 
 using namespace llvm;
@@ -1145,6 +1146,55 @@ void PEI::replaceFrameIndices(MachineBas
   }
 }
 
+/// Allocate a register for the virtual register \p VReg. The last use of
+/// \p VReg is around the current position of the register scavenger \p RS.
+/// \p ReserveAfter controls whether the scavenged register needs to be reserved
+/// after the current instruction, otherwise it will only be reserved before the
+/// current instruction.
+static unsigned scavengeVReg(MachineRegisterInfo &MRI, RegScavenger &RS,
+                             unsigned VReg, bool ReserveAfter) {
+#ifndef NDEBUG
+  // Verify that all definitions and uses are in the same basic block.
+  const MachineBasicBlock *CommonMBB = nullptr;
+  bool HadDef = false;
+  for (MachineOperand &MO : MRI.reg_nodbg_operands(VReg)) {
+    MachineBasicBlock *MBB = MO.getParent()->getParent();
+    if (CommonMBB == nullptr)
+      CommonMBB = MBB;
+    assert(MBB == CommonMBB && "All defs+uses must be in the same basic block");
+    if (MO.isDef())
+      HadDef = true;
+  }
+  assert(HadDef && "Must have at least 1 Def");
+#endif
+
+  // We should only have one definition of the register. However to accomodate
+  // the requirements of two address code we also allow definitions in
+  // subsequent instructions provided they also read the register. That way
+  // we get a single contiguous lifetime.
+  //
+  // Definitions in MRI.def_begin() are unordered, search for the first.
+  const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
+  MachineRegisterInfo::def_iterator FirstDef =
+    std::find_if(MRI.def_begin(VReg), MRI.def_end(),
+                 [VReg, &TRI](const MachineOperand &MO) {
+      return !MO.getParent()->readsRegister(VReg, &TRI);
+    });
+  assert(FirstDef != MRI.def_end() &&
+         "Must have one definition that does not redefine vreg");
+  MachineInstr &DefMI = *FirstDef->getParent();
+
+  // The register scavenger will report a free register inserting an emergency
+  // spill/reload if necessary.
+  int SPAdj = 0;
+  const TargetRegisterClass &RC = *MRI.getRegClass(VReg);
+  unsigned SReg = RS.scavengeRegisterBackwards(RC, DefMI.getIterator(),
+                                               ReserveAfter, SPAdj);
+  MRI.replaceRegWith(VReg, SReg);
+  ++NumScavengedRegs;
+  return SReg;
+}
+
 /// doScavengeFrameVirtualRegs - Replace all frame index virtual registers
 /// with physical registers. Use the register scavenger to find an
 /// appropriate register to use.
@@ -1156,78 +1206,51 @@ static void
 doScavengeFrameVirtualRegs(MachineFunction &MF, RegScavenger *RS) {
   // Run through the instructions and find any virtual registers.
   MachineRegisterInfo &MRI = MF.getRegInfo();
+  const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
   for (MachineBasicBlock &MBB : MF) {
-    RS->enterBasicBlock(MBB);
+    RS->enterBasicBlockEnd(MBB);
 
-    int SPAdj = 0;
-
-    // The instruction stream may change in the loop, so check MBB.end()
-    // directly.
-    for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ) {
-      // We might end up here again with a NULL iterator if we scavenged a
-      // register for which we inserted spill code for definition by what was
-      // originally the first instruction in MBB.
-      if (I == MachineBasicBlock::iterator(nullptr))
-        I = MBB.begin();
+    bool LastIterationHadVRegUses = false;
+    for (MachineBasicBlock::iterator I = MBB.end(); I != MBB.begin(); ) {
+      --I;
+      // Move RegScavenger to the position between *I and *std::next(I).
+      RS->backward(I);
+
+      // Look for unassigned vregs in the uses of *std::next(I).
+      if (LastIterationHadVRegUses) {
+        MachineBasicBlock::iterator N = std::next(I);
+        const MachineInstr &NMI = *N;
+        for (const MachineOperand &MO : NMI.operands()) {
+          if (!MO.isReg() || !MO.readsReg())
+            continue;
+          unsigned Reg = MO.getReg();
+          if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+            unsigned SReg = scavengeVReg(MRI, *RS, Reg, true);
+            N->addRegisterKilled(SReg, &TRI, false);
+            RS->setRegUsed(SReg);
+          }
+        }
+      }
 
+      // Look for unassigned vregs in the defs of *I.
+      LastIterationHadVRegUses = false;
       const MachineInstr &MI = *I;
-      MachineBasicBlock::iterator J = std::next(I);
-      MachineBasicBlock::iterator P =
-                         I == MBB.begin() ? MachineBasicBlock::iterator(nullptr)
-                                          : std::prev(I);
-
-      // RS should process this instruction before we might scavenge at this
-      // location. This is because we might be replacing a virtual register
-      // defined by this instruction, and if so, registers killed by this
-      // instruction are available, and defined registers are not.
-      RS->forward(I);
-
       for (const MachineOperand &MO : MI.operands()) {
         if (!MO.isReg())
           continue;
         unsigned Reg = MO.getReg();
         if (!TargetRegisterInfo::isVirtualRegister(Reg))
           continue;
-
-        // When we first encounter a new virtual register, it
-        // must be a definition.
-        assert(MO.isDef() && "frame index virtual missing def!");
-        // Scavenge a new scratch register
-        const TargetRegisterClass *RC = MRI.getRegClass(Reg);
-        unsigned ScratchReg = RS->scavengeRegister(RC, J, SPAdj);
-
-        ++NumScavengedRegs;
-
-        // Replace this reference to the virtual register with the
-        // scratch register.
-        assert(ScratchReg && "Missing scratch register!");
-        MRI.replaceRegWith(Reg, ScratchReg);
-
-        // Because this instruction was processed by the RS before this
-        // register was allocated, make sure that the RS now records the
-        // register as being used.
-        RS->setRegUsed(ScratchReg);
+        // We have to look at all operands anyway so we can precalculate here
+        // whether there is a reading operand. This allows use to skip the use
+        // step in the next iteration if there was none.
+        if (MO.readsReg())
+          LastIterationHadVRegUses = true;
+        if (MO.isDef()) {
+          unsigned SReg = scavengeVReg(MRI, *RS, Reg, false);
+          I->addRegisterDead(SReg, &TRI, false);
+        }
       }
-
-      // If the scavenger needed to use one of its spill slots, the
-      // spill code will have been inserted in between I and J. This is a
-      // problem because we need the spill code before I: Move I to just
-      // prior to J.
-      if (I != std::prev(J)) {
-        MBB.splice(J, &MBB, I);
-
-        // Before we move I, we need to prepare the RS to visit I again.
-        // Specifically, RS will assert if it sees uses of registers that
-        // it believes are undefined. Because we have already processed
-        // register kills in I, when it visits I again, it will believe that
-        // those registers are undefined. To avoid this situation, unprocess
-        // the instruction I.
-        assert(RS->getCurrentPosition() == I &&
-          "The register scavenger has an unexpected position");
-        I = P;
-        RS->unprocess(P);
-      } else
-        ++I;
     }
   }
 }

Modified: llvm/trunk/lib/CodeGen/RegisterScavenging.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegisterScavenging.cpp?rev=279124&r1=279123&r2=279124&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/RegisterScavenging.cpp (original)
+++ llvm/trunk/lib/CodeGen/RegisterScavenging.cpp Thu Aug 18 14:47:59 2016
@@ -299,6 +299,14 @@ void RegScavenger::backward() {
     }
   }
 
+  // Expire scavenge spill frameindex uses.
+  for (ScavengedInfo &I : Scavenged) {
+    if (I.Restore == &MI) {
+      I.Reg = 0;
+      I.Restore = nullptr;
+    }
+  }
+
   if (MBBI == MBB->begin()) {
     MBBI = MachineBasicBlock::iterator(nullptr);
     Tracking = false;
@@ -398,6 +406,69 @@ unsigned RegScavenger::findSurvivorReg(M
   return Survivor;
 }
 
+static std::pair<unsigned, MachineBasicBlock::iterator>
+findSurvivorBackwards(const TargetRegisterInfo &TRI,
+    MachineBasicBlock::iterator From, MachineBasicBlock::iterator To,
+    BitVector &Available, BitVector &Candidates) {
+  bool FoundTo = false;
+  unsigned Survivor = 0;
+  MachineBasicBlock::iterator Pos;
+  MachineBasicBlock &MBB = *From->getParent();
+  unsigned InstrLimit = 25;
+  unsigned InstrCountDown = InstrLimit;
+  for (MachineBasicBlock::iterator I = From;; --I) {
+    const MachineInstr &MI = *I;
+
+    // Remove any candidates touched by instruction.
+    bool FoundVReg = false;
+    for (const MachineOperand &MO : MI.operands()) {
+      if (MO.isRegMask()) {
+        Candidates.clearBitsNotInMask(MO.getRegMask());
+        continue;
+      }
+      if (!MO.isReg() || MO.isUndef() || MO.isDebug())
+        continue;
+      unsigned Reg = MO.getReg();
+      if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+        FoundVReg = true;
+      } else if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
+        for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
+          Candidates.reset(*AI);
+      }
+    }
+
+    if (I == To) {
+      // If one of the available registers survived this long take it.
+      Available &= Candidates;
+      int Reg = Available.find_first();
+      if (Reg != -1)
+        return std::make_pair(Reg, MBB.end());
+      // Otherwise we will continue up to InstrLimit instructions to find
+      // the register which is not defined/used for the longest time.
+      FoundTo = true;
+      Pos = To;
+    }
+    if (FoundTo) {
+      if (Survivor == 0 || !Candidates.test(Survivor)) {
+        int Reg = Candidates.find_first();
+        if (Reg == -1)
+          break;
+        Survivor = Reg;
+      }
+      if (--InstrCountDown == 0 || I == MBB.begin())
+        break;
+      if (FoundVReg) {
+        // Keep searching when we find a vreg since the spilled register will
+        // be usefull for this other vreg as well later.
+        InstrCountDown = InstrLimit;
+        Pos = I;
+      }
+    }
+  }
+
+  return std::make_pair(Survivor, Pos);
+}
+
 static unsigned getFrameIndexOperandNum(MachineInstr &MI) {
   unsigned i = 0;
   while (!MI.getOperand(i).isFI()) {
@@ -407,43 +478,16 @@ static unsigned getFrameIndexOperandNum(
   return i;
 }
 
-unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
-                                        MachineBasicBlock::iterator I,
-                                        int SPAdj) {
-  MachineInstr &MI = *I;
-  const MachineFunction &MF = *MI.getParent()->getParent();
-  // Consider all allocatable registers in the register class initially
-  BitVector Candidates = TRI->getAllocatableSet(MF, RC);
-
-  // Exclude all the registers being used by the instruction.
-  for (const MachineOperand &MO : MI.operands()) {
-    if (MO.isReg() && MO.getReg() != 0 && !(MO.isUse() && MO.isUndef()) &&
-        !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
-      Candidates.reset(MO.getReg());
-  }
-
-  // Try to find a register that's unused if there is one, as then we won't
-  // have to spill.
-  BitVector Available = getRegsAvailable(RC);
-  Available &= Candidates;
-  if (Available.any())
-    Candidates = Available;
-
-  // Find the register whose use is furthest away.
-  MachineBasicBlock::iterator UseMI;
-  unsigned SReg = findSurvivorReg(I, Candidates, 25, UseMI);
-
-  // If we found an unused register there is no reason to spill it.
-  if (!isRegUsed(SReg)) {
-    DEBUG(dbgs() << "Scavenged register: " << TRI->getName(SReg) << "\n");
-    return SReg;
-  }
-
+RegScavenger::ScavengedInfo &
+RegScavenger::spill(unsigned Reg, const TargetRegisterClass &RC, int SPAdj,
+                    MachineBasicBlock::iterator Before,
+                    MachineBasicBlock::iterator &UseMI) {
   // Find an available scavenging slot with size and alignment matching
   // the requirements of the class RC.
+  const MachineFunction &MF = *Before->getParent()->getParent();
   const MachineFrameInfo &MFI = MF.getFrameInfo();
-  unsigned NeedSize = RC->getSize();
-  unsigned NeedAlign = RC->getAlignment();
+  unsigned NeedSize = RC.getSize();
+  unsigned NeedAlign = RC.getAlignment();
 
   unsigned SI = Scavenged.size(), Diff = UINT_MAX;
   int FIB = MFI.getObjectIndexBegin(), FIE = MFI.getObjectIndexEnd();
@@ -478,42 +522,110 @@ unsigned RegScavenger::scavengeRegister(
   }
 
   // Avoid infinite regress
-  Scavenged[SI].Reg = SReg;
+  Scavenged[SI].Reg = Reg;
 
   // If the target knows how to save/restore the register, let it do so;
   // otherwise, use the emergency stack spill slot.
-  if (!TRI->saveScavengerRegister(*MBB, I, UseMI, RC, SReg)) {
-    // Spill the scavenged register before I.
+  if (!TRI->saveScavengerRegister(*MBB, Before, UseMI, &RC, Reg)) {
+    // Spill the scavenged register before \p Before.
     int FI = Scavenged[SI].FrameIndex;
     if (FI < FIB || FI >= FIE) {
       std::string Msg = std::string("Error while trying to spill ") +
-          TRI->getName(SReg) + " from class " + TRI->getRegClassName(RC) +
+          TRI->getName(Reg) + " from class " + TRI->getRegClassName(&RC) +
           ": Cannot scavenge register without an emergency spill slot!";
       report_fatal_error(Msg.c_str());
     }
-    TII->storeRegToStackSlot(*MBB, I, SReg, true, Scavenged[SI].FrameIndex,
-                             RC, TRI);
-    MachineBasicBlock::iterator II = std::prev(I);
+    TII->storeRegToStackSlot(*MBB, Before, Reg, true, Scavenged[SI].FrameIndex,
+                             &RC, TRI);
+    MachineBasicBlock::iterator II = std::prev(Before);
 
     unsigned FIOperandNum = getFrameIndexOperandNum(*II);
     TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this);
 
     // Restore the scavenged register before its use (or first terminator).
-    TII->loadRegFromStackSlot(*MBB, UseMI, SReg, Scavenged[SI].FrameIndex,
-                              RC, TRI);
+    TII->loadRegFromStackSlot(*MBB, UseMI, Reg, Scavenged[SI].FrameIndex,
+                              &RC, TRI);
     II = std::prev(UseMI);
 
     FIOperandNum = getFrameIndexOperandNum(*II);
     TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this);
   }
+  return Scavenged[SI];
+}
 
-  Scavenged[SI].Restore = &*std::prev(UseMI);
+unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
+                                        MachineBasicBlock::iterator I,
+                                        int SPAdj) {
+  MachineInstr &MI = *I;
+  const MachineFunction &MF = *MI.getParent()->getParent();
+  // Consider all allocatable registers in the register class initially
+  BitVector Candidates = TRI->getAllocatableSet(MF, RC);
 
-  // Doing this here leads to infinite regress.
-  // Scavenged[SI].Reg = SReg;
+  // Exclude all the registers being used by the instruction.
+  for (const MachineOperand &MO : MI.operands()) {
+    if (MO.isReg() && MO.getReg() != 0 && !(MO.isUse() && MO.isUndef()) &&
+        !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+      Candidates.reset(MO.getReg());
+  }
+
+  // Try to find a register that's unused if there is one, as then we won't
+  // have to spill.
+  BitVector Available = getRegsAvailable(RC);
+  Available &= Candidates;
+  if (Available.any())
+    Candidates = Available;
+
+  // Find the register whose use is furthest away.
+  MachineBasicBlock::iterator UseMI;
+  unsigned SReg = findSurvivorReg(I, Candidates, 25, UseMI);
+
+  // If we found an unused register there is no reason to spill it.
+  if (!isRegUsed(SReg)) {
+    DEBUG(dbgs() << "Scavenged register: " << TRI->getName(SReg) << "\n");
+    return SReg;
+  }
+
+  ScavengedInfo &Scavenged = spill(SReg, *RC, SPAdj, I, UseMI);
+  Scavenged.Restore = &*std::prev(UseMI);
 
   DEBUG(dbgs() << "Scavenged register (with spill): " << TRI->getName(SReg) <<
         "\n");
 
   return SReg;
 }
+
+unsigned RegScavenger::scavengeRegisterBackwards(const TargetRegisterClass &RC,
+                                                 MachineBasicBlock::iterator To,
+                                                 bool RestoreAfter, int SPAdj) {
+  const MachineBasicBlock &MBB = *To->getParent();
+  const MachineFunction &MF = *MBB.getParent();
+  // Consider all allocatable registers in the register class initially
+  BitVector Candidates = TRI->getAllocatableSet(MF, &RC);
+
+  // Try to find a register that's unused if there is one, as then we won't
+  // have to spill.
+  BitVector Available = getRegsAvailable(&RC);
+
+  // Find the register whose use is furthest away.
+  MachineBasicBlock::iterator UseMI;
+  std::pair<unsigned, MachineBasicBlock::iterator> P =
+      findSurvivorBackwards(*TRI, MBBI, To, Available, Candidates);
+  unsigned Reg = P.first;
+  assert(Reg != 0 && "No register left to scavenge!");
+  // Found an available register?
+  if (!Available.test(Reg)) {
+    MachineBasicBlock::iterator ReloadAfter =
+      RestoreAfter ? std::next(MBBI) : MBBI;
+    MachineBasicBlock::iterator ReloadBefore = std::next(ReloadAfter);
+    DEBUG(dbgs() << "Reload before: " << *ReloadBefore << '\n');
+    MachineBasicBlock::iterator SpillBefore = P.second;
+    ScavengedInfo &Scavenged = spill(Reg, RC, SPAdj, SpillBefore, ReloadBefore);
+    Scavenged.Restore = &*std::prev(SpillBefore);
+    addRegUnits(RegUnitsAvailable, Reg);
+    DEBUG(dbgs() << "Scavenged register with spill: " << PrintReg(Reg, TRI)
+          << " until " << *SpillBefore);
+  } else {
+    DEBUG(dbgs() << "Scavenged free register: " << PrintReg(Reg, TRI) << '\n');
+  }
+  return Reg;
+}

Modified: llvm/trunk/test/CodeGen/AMDGPU/captured-frame-index.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/captured-frame-index.ll?rev=279124&r1=279123&r2=279124&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/captured-frame-index.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/captured-frame-index.ll Thu Aug 18 14:47:59 2016
@@ -140,8 +140,8 @@ define void @stored_fi_to_global_2_small
 }
 
 ; GCN-LABEL: {{^}}stored_fi_to_global_huge_frame_offset:
-; GCN: s_add_i32 [[BASE_1_OFF_0:s[0-9]+]], 0, 0x3ffc
-; GCN: v_mov_b32_e32 [[BASE_0:v[0-9]+]], 0{{$}}
+; GCN-DAG: s_add_i32 [[BASE_1_OFF_0:s[0-9]+]], 0, 0x3ffc
+; GCN-DAG: v_mov_b32_e32 [[BASE_0:v[0-9]+]], 0{{$}}
 ; GCN: buffer_store_dword [[BASE_0]], v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen
 
 ; GCN: v_mov_b32_e32 [[V_BASE_1_OFF_0:v[0-9]+]], [[BASE_1_OFF_0]]

Modified: llvm/trunk/test/CodeGen/Mips/emergency-spill-slot-near-fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/emergency-spill-slot-near-fp.ll?rev=279124&r1=279123&r2=279124&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/emergency-spill-slot-near-fp.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/emergency-spill-slot-near-fp.ll Thu Aug 18 14:47:59 2016
@@ -1,34 +1,62 @@
-; Check that register scavenging spill slot is close to $fp.
 ; RUN: llc -march=mipsel -O0 -relocation-model=pic < %s | FileCheck %s
+; Check that register scavenging spill slot is close to $fp.
+target triple="mipsel--"
 
-; CHECK: sw ${{.*}}, 8($sp)
-; CHECK: lw ${{.*}}, 8($sp)
+ at var = external global i32
+ at ptrvar = external global i8*
 
-define i32 @main(i32 signext %argc, i8** %argv) #0 {
-entry:
-  %retval = alloca i32, align 4
-  %argc.addr = alloca i32, align 4
-  %argv.addr = alloca i8**, align 4
-  %v0 = alloca <16 x i8>, align 16
-  %.compoundliteral = alloca <16 x i8>, align 16
-  %v1 = alloca <16 x i8>, align 16
-  %.compoundliteral1 = alloca <16 x i8>, align 16
-  %unused_variable = alloca [16384 x i32], align 4
-  %result = alloca <16 x i8>, align 16
-  store i32 0, i32* %retval
-  store i32 %argc, i32* %argc.addr, align 4
-  store i8** %argv, i8*** %argv.addr, align 4
-  store <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8>* %.compoundliteral
-  %0 = load <16 x i8>, <16 x i8>* %.compoundliteral
-  store <16 x i8> %0, <16 x i8>* %v0, align 16
-  store <16 x i8> zeroinitializer, <16 x i8>* %.compoundliteral1
-  %1 = load <16 x i8>, <16 x i8>* %.compoundliteral1
-  store <16 x i8> %1, <16 x i8>* %v1, align 16
-  %2 = load <16 x i8>, <16 x i8>* %v0, align 16
-  %3 = load <16 x i8>, <16 x i8>* %v1, align 16
-  %mul = mul <16 x i8> %2, %3
-  store <16 x i8> %mul, <16 x i8>* %result, align 16
-  ret i32 0
-}
+; CHECK-LABEL: func:
+define void @func() {
+  %space = alloca i32, align 4
+  %stackspace = alloca[16384 x i32], align 4
+
+  ; ensure stackspace is not optimized out
+  %stackspace_casted = bitcast [16384 x i32]* %stackspace to i8*
+  store volatile i8* %stackspace_casted, i8** @ptrvar
 
-attributes #0 = { noinline "no-frame-pointer-elim"="true" }
+  ; Load values to increase register pressure.
+  %v0 = load volatile i32, i32* @var
+  %v1 = load volatile i32, i32* @var
+  %v2 = load volatile i32, i32* @var
+  %v3 = load volatile i32, i32* @var
+  %v4 = load volatile i32, i32* @var
+  %v5 = load volatile i32, i32* @var
+  %v6 = load volatile i32, i32* @var
+  %v7 = load volatile i32, i32* @var
+  %v8 = load volatile i32, i32* @var
+  %v9 = load volatile i32, i32* @var
+  %v10 = load volatile i32, i32* @var
+  %v11 = load volatile i32, i32* @var
+  %v12 = load volatile i32, i32* @var
+  %v13 = load volatile i32, i32* @var
+  %v14 = load volatile i32, i32* @var
+  %v15 = load volatile i32, i32* @var
+  %v16 = load volatile i32, i32* @var
+
+  ; Computing a stack-relative values needs an additional register.
+  ; We should get an emergency spill/reload for this.
+  ; CHECK: sw ${{.*}}, 0($sp)
+  ; CHECK: lw ${{.*}}, 0($sp)
+  store volatile i32 %v0, i32* %space
+
+  ; store values so they are used.
+  store volatile i32 %v0, i32* @var
+  store volatile i32 %v1, i32* @var
+  store volatile i32 %v2, i32* @var
+  store volatile i32 %v3, i32* @var
+  store volatile i32 %v4, i32* @var
+  store volatile i32 %v5, i32* @var
+  store volatile i32 %v6, i32* @var
+  store volatile i32 %v7, i32* @var
+  store volatile i32 %v8, i32* @var
+  store volatile i32 %v9, i32* @var
+  store volatile i32 %v10, i32* @var
+  store volatile i32 %v11, i32* @var
+  store volatile i32 %v12, i32* @var
+  store volatile i32 %v13, i32* @var
+  store volatile i32 %v14, i32* @var
+  store volatile i32 %v15, i32* @var
+  store volatile i32 %v16, i32* @var
+
+  ret void
+}

Modified: llvm/trunk/test/CodeGen/PowerPC/dyn-alloca-aligned.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/dyn-alloca-aligned.ll?rev=279124&r1=279123&r2=279124&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/dyn-alloca-aligned.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/dyn-alloca-aligned.ll Thu Aug 18 14:47:59 2016
@@ -25,8 +25,8 @@ entry:
 
 ; CHECK-DAG: li [[REG1:[0-9]+]], -128
 ; CHECK-DAG: neg [[REG2:[0-9]+]],
-; CHECK: and [[REG1]], [[REG2]], [[REG1]]
-; CHECK: stdux {{[0-9]+}}, 1, [[REG1]]
+; CHECK: and [[REG3:[0-9]+]], [[REG2]], [[REG1]]
+; CHECK: stdux {{[0-9]+}}, 1, [[REG3]]
 
 ; CHECK: blr
 

Modified: llvm/trunk/test/CodeGen/Thumb/large-stack.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb/large-stack.ll?rev=279124&r1=279123&r2=279124&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb/large-stack.ll (original)
+++ llvm/trunk/test/CodeGen/Thumb/large-stack.ll Thu Aug 18 14:47:59 2016
@@ -46,8 +46,8 @@ define i32 @test3() {
 ; CHECK-LABEL: test3:
 ; CHECK: ldr [[TEMP:r[0-7]]],
 ; CHECK: add sp, [[TEMP]]
-; CHECK: ldr [[TEMP]],
-; CHECK: add [[TEMP]], sp
+; CHECK: ldr [[TEMP2:r[0-7]]],
+; CHECK: add [[TEMP2]], sp
 ; EABI: ldr [[TEMP:r[0-7]]],
 ; EABI: add sp, [[TEMP]]
 ; IOS: subs r4, r7, #4




More information about the llvm-commits mailing list