[llvm-commits] [llvm] r59559 - in /llvm/trunk: lib/CodeGen/RegisterScavenging.cpp test/CodeGen/ARM/2008-11-18-ScavengerAssert.ll

Evan Cheng evan.cheng at apple.com
Tue Nov 18 14:28:38 PST 2008


Author: evancheng
Date: Tue Nov 18 16:28:38 2008
New Revision: 59559

URL: http://llvm.org/viewvc/llvm-project?rev=59559&view=rev
Log:
Register scavenger should process early clobber defs first. A dead early clobber def should not interfere with a normal def which happens one slot later.

Added:
    llvm/trunk/test/CodeGen/ARM/2008-11-18-ScavengerAssert.ll
Modified:
    llvm/trunk/lib/CodeGen/RegisterScavenging.cpp

Modified: llvm/trunk/lib/CodeGen/RegisterScavenging.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegisterScavenging.cpp?rev=59559&r1=59558&r2=59559&view=diff

==============================================================================
--- llvm/trunk/lib/CodeGen/RegisterScavenging.cpp (original)
+++ llvm/trunk/lib/CodeGen/RegisterScavenging.cpp Tue Nov 18 16:28:38 2008
@@ -190,47 +190,61 @@
   if (TID.isTerminator())
     restoreScavengedReg();
 
-  // Process uses first.
-  BitVector ChangedRegs(NumPhysRegs);
+  bool IsImpDef = MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF;
+
+  // Separate register operands into 3 classes: uses, defs, earlyclobbers.
+  SmallVector<const MachineOperand*, 4> UseMOs;
+  SmallVector<const MachineOperand*, 4> DefMOs;
+  SmallVector<const MachineOperand*, 4> EarlyClobberMOs;
   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
     const MachineOperand &MO = MI->getOperand(i);
-    if (!MO.isReg() || !MO.isUse())
+    if (!MO.isReg() || MO.getReg() == 0)
       continue;
+    if (MO.isUse())
+      UseMOs.push_back(&MO);
+    else if (MO.isEarlyClobber())
+      EarlyClobberMOs.push_back(&MO);
+    else
+      DefMOs.push_back(&MO);
+  }
 
+  // Process uses first.
+  BitVector UseRegs(NumPhysRegs);
+  for (unsigned i = 0, e = UseMOs.size(); i != e; ++i) {
+    const MachineOperand &MO = *UseMOs[i];
     unsigned Reg = MO.getReg();
-    if (Reg == 0) continue;
 
     if (!isUsed(Reg)) {
       // Register has been scavenged. Restore it!
-      if (Reg != ScavengedReg)
-        assert(false && "Using an undefined register!");
-      else
+      if (Reg == ScavengedReg)
         restoreScavengedReg();
+      else
+        assert(false && "Using an undefined register!");
     }
 
     if (MO.isKill() && !isReserved(Reg)) {
-      ChangedRegs.set(Reg);
+      UseRegs.set(Reg);
 
-      // Mark sub-registers as changed if they aren't defined in the same
-      // instruction.
+      // Mark sub-registers as used.
       for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
            unsigned SubReg = *SubRegs; ++SubRegs)
-        ChangedRegs.set(SubReg);
+        UseRegs.set(SubReg);
     }
   }
 
   // Change states of all registers after all the uses are processed to guard
   // against multiple uses.
-  setUnused(ChangedRegs);
-
-  // Process defs.
-  bool IsImpDef = MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF;
-  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
-    const MachineOperand &MO = MI->getOperand(i);
-
-    if (!MO.isReg() || !MO.isDef())
-      continue;
+  setUnused(UseRegs);
 
+  // Process early clobber defs then process defs. We can have a early clobber
+  // that is dead, it should not conflict with a def that happens one "slot"
+  // (see InstrSlots in LiveIntervalAnalysis.h) later.
+  unsigned NumECs = EarlyClobberMOs.size();
+  unsigned NumDefs = DefMOs.size();
+
+  for (unsigned i = 0, e = NumECs + NumDefs; i != e; ++i) {
+    const MachineOperand &MO = (i < NumECs)
+      ? *EarlyClobberMOs[i] : *DefMOs[i-NumECs];
     unsigned Reg = MO.getReg();
 
     // If it's dead upon def, then it is now free.
@@ -282,7 +296,7 @@
   }
 
   // Process uses.
-  BitVector ChangedRegs(NumPhysRegs);
+  BitVector UseRegs(NumPhysRegs);
   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
     const MachineOperand &MO = MI->getOperand(i);
     if (!MO.isReg() || !MO.isUse())
@@ -291,14 +305,14 @@
     if (Reg == 0)
       continue;
     assert(isUnused(Reg) || isReserved(Reg));
-    ChangedRegs.set(Reg);
+    UseRegs.set(Reg);
 
     // Set the sub-registers as "used".
     for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
          unsigned SubReg = *SubRegs; ++SubRegs)
-      ChangedRegs.set(SubReg);
+      UseRegs.set(SubReg);
   }
-  setUsed(ChangedRegs);
+  setUsed(UseRegs);
 }
 
 void RegScavenger::getRegsUsed(BitVector &used, bool includeReserved) {

Added: llvm/trunk/test/CodeGen/ARM/2008-11-18-ScavengerAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2008-11-18-ScavengerAssert.ll?rev=59559&view=auto

==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2008-11-18-ScavengerAssert.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/2008-11-18-ScavengerAssert.ll Tue Nov 18 16:28:38 2008
@@ -0,0 +1,16 @@
+; RUN: llvm-as < %s | llc -march=arm -mattr=+v6,+vfp2
+
+define hidden i64 @__muldi3(i64 %u, i64 %v) nounwind {
+entry:
+	%0 = trunc i64 %u to i32		; <i32> [#uses=1]
+	%asmtmp = tail call { i32, i32, i32, i32, i32 } asm "@ Inlined umul_ppmm\0A\09mov\09$2, $5, lsr #16\0A\09mov\09$0, $6, lsr #16\0A\09bic\09$3, $5, $2, lsl #16\0A\09bic\09$4, $6, $0, lsl #16\0A\09mul\09$1, $3, $4\0A\09mul\09$4, $2, $4\0A\09mul\09$3, $0, $3\0A\09mul\09$0, $2, $0\0A\09adds\09$3, $4, $3\0A\09addcs\09$0, $0, #65536\0A\09adds\09$1, $1, $3, lsl #16\0A\09adc\09$0, $0, $3, lsr #16", "=&r,=r,=&r,=&r,=r,r,r,~{cc}"(i32 %0, i32 0) nounwind		; <{ i32, i32, i32, i32, i32 }> [#uses=1]
+	%asmresult1 = extractvalue { i32, i32, i32, i32, i32 } %asmtmp, 1		; <i32> [#uses=1]
+	%asmresult116 = zext i32 %asmresult1 to i64		; <i64> [#uses=1]
+	%asmresult116.ins = or i64 0, %asmresult116		; <i64> [#uses=1]
+	%1 = lshr i64 %v, 32		; <i64> [#uses=1]
+	%2 = mul i64 %1, %u		; <i64> [#uses=1]
+	%3 = add i64 %2, 0		; <i64> [#uses=1]
+	%4 = shl i64 %3, 32		; <i64> [#uses=1]
+	%5 = add i64 %asmresult116.ins, %4		; <i64> [#uses=1]
+	ret i64 %5
+}





More information about the llvm-commits mailing list