[llvm-commits] CVS: llvm/lib/Target/X86/X86ISelLowering.cpp X86ISelLowering.h X86InstrInfo.td X86RegisterInfo.cpp

Anton Korobeynikov asl at math.spbu.ru
Tue Apr 17 02:20:35 PDT 2007



Changes in directory llvm/lib/Target/X86:

X86ISelLowering.cpp updated: 1.390 -> 1.391
X86ISelLowering.h updated: 1.97 -> 1.98
X86InstrInfo.td updated: 1.301 -> 1.302
X86RegisterInfo.cpp updated: 1.209 -> 1.210
---
Log message:

Implemented correct stack probing on mingw/cygwin for dynamic alloca's. 
Also, fixed static case in presence of eax livin. This fixes PR331: http://llvm.org/PR331 

PS: Why don't we still have push/pop instructions? :)


---
Diffs of the changes:  (+68 -5)

 X86ISelLowering.cpp |   36 +++++++++++++++++++++++++++++++++++-
 X86ISelLowering.h   |    1 +
 X86InstrInfo.td     |    3 +++
 X86RegisterInfo.cpp |   33 +++++++++++++++++++++++++++++----
 4 files changed, 68 insertions(+), 5 deletions(-)


Index: llvm/lib/Target/X86/X86ISelLowering.cpp
diff -u llvm/lib/Target/X86/X86ISelLowering.cpp:1.390 llvm/lib/Target/X86/X86ISelLowering.cpp:1.391
--- llvm/lib/Target/X86/X86ISelLowering.cpp:1.390	Mon Apr 16 13:10:23 2007
+++ llvm/lib/Target/X86/X86ISelLowering.cpp	Tue Apr 17 04:20:00 2007
@@ -237,7 +237,10 @@
   setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
   if (Subtarget->is64Bit())
     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
-  setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Expand);
+  if (Subtarget->isTargetCygMing())
+    setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
+  else
+    setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
 
   if (X86ScalarSSE) {
     // Set up the FP register classes.
@@ -3401,6 +3404,36 @@
     }
 }
 
+SDOperand X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op,
+                                                     SelectionDAG &DAG) {
+  // Get the inputs.
+  SDOperand Chain = Op.getOperand(0);
+  SDOperand Size  = Op.getOperand(1);
+  // FIXME: Ensure alignment here
+
+  TargetLowering::ArgListTy Args; 
+  TargetLowering::ArgListEntry Entry;
+  MVT::ValueType IntPtr = getPointerTy();
+  MVT::ValueType SPTy = (Subtarget->is64Bit() ? MVT::i64 : MVT::i32);
+  const Type *IntPtrTy = getTargetData()->getIntPtrType();
+  
+  Entry.Node    = Size;
+  Entry.Ty      = IntPtrTy;
+  Entry.isInReg = true; // Should pass in EAX
+  Args.push_back(Entry);
+  std::pair<SDOperand, SDOperand> CallResult =
+    LowerCallTo(Chain, IntPtrTy, false, false, CallingConv::C, false,
+                DAG.getExternalSymbol("_alloca", IntPtr), Args, DAG);
+
+  SDOperand SP = DAG.getCopyFromReg(CallResult.second, X86StackPtr, SPTy);
+  
+  std::vector<MVT::ValueType> Tys;
+  Tys.push_back(SPTy);
+  Tys.push_back(MVT::Other);
+  SDOperand Ops[2] = { SP, CallResult.second };
+  return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2);
+}
+
 SDOperand
 X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) {
   MachineFunction &MF = DAG.getMachineFunction();
@@ -4002,6 +4035,7 @@
   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
+  case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
   }
   return SDOperand();
 }


Index: llvm/lib/Target/X86/X86ISelLowering.h
diff -u llvm/lib/Target/X86/X86ISelLowering.h:1.97 llvm/lib/Target/X86/X86ISelLowering.h:1.98
--- llvm/lib/Target/X86/X86ISelLowering.h:1.97	Mon Apr  9 18:31:19 2007
+++ llvm/lib/Target/X86/X86ISelLowering.h	Tue Apr 17 04:20:00 2007
@@ -401,6 +401,7 @@
     SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG);
     SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG);
     SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG);
+    SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG);
     SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG);
     SDOperand LowerREADCYCLCECOUNTER(SDOperand Op, SelectionDAG &DAG);
     SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG);


Index: llvm/lib/Target/X86/X86InstrInfo.td
diff -u llvm/lib/Target/X86/X86InstrInfo.td:1.301 llvm/lib/Target/X86/X86InstrInfo.td:1.302
--- llvm/lib/Target/X86/X86InstrInfo.td:1.301	Tue Apr 10 17:10:25 2007
+++ llvm/lib/Target/X86/X86InstrInfo.td	Tue Apr 17 04:20:00 2007
@@ -477,6 +477,9 @@
 def POP32r   : I<0x58, AddRegFrm,
                  (ops GR32:$reg), "pop{l} $reg", []>, Imp<[ESP],[ESP]>;
 
+def PUSH32r  : I<0x50, AddRegFrm,
+                 (ops GR32:$reg), "push{l} $reg", []>, Imp<[ESP],[ESP]>;
+
 def MovePCtoStack : I<0, Pseudo, (ops piclabel:$label),
                       "call $label", []>;
 


Index: llvm/lib/Target/X86/X86RegisterInfo.cpp
diff -u llvm/lib/Target/X86/X86RegisterInfo.cpp:1.209 llvm/lib/Target/X86/X86RegisterInfo.cpp:1.210
--- llvm/lib/Target/X86/X86RegisterInfo.cpp:1.209	Tue Apr  3 01:18:31 2007
+++ llvm/lib/Target/X86/X86RegisterInfo.cpp	Tue Apr 17 04:20:00 2007
@@ -1039,14 +1039,39 @@
 
   if (NumBytes) {   // adjust stack pointer: ESP -= numbytes
     if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) {
+      // Check, whether EAX is livein for this function
+      bool isEAXAlive = false;
+      for (MachineFunction::livein_iterator II = MF.livein_begin(),
+             EE = MF.livein_end(); (II != EE) && !isEAXAlive; ++II) {
+        unsigned Reg = II->first;
+        isEAXAlive = (Reg == X86::EAX || Reg == X86::AX ||
+                      Reg == X86::AH || Reg == X86::AL);
+      }
+
       // Function prologue calls _alloca to probe the stack when allocating  
       // more than 4k bytes in one go. Touching the stack at 4K increments is  
       // necessary to ensure that the guard pages used by the OS virtual memory
       // manager are allocated in correct sequence.
-      MI = BuildMI(TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes);
-      MBB.insert(MBBI, MI);
-      MI = BuildMI(TII.get(X86::CALLpcrel32)).addExternalSymbol("_alloca");
-      MBB.insert(MBBI, MI);
+      if (!isEAXAlive) {
+        MI = BuildMI(TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes);
+        MBB.insert(MBBI, MI);
+        MI = BuildMI(TII.get(X86::CALLpcrel32)).addExternalSymbol("_alloca");
+        MBB.insert(MBBI, MI);
+      } else {
+        // Save EAX
+        MI = BuildMI(TII.get(X86::PUSH32r), X86::EAX);
+        MBB.insert(MBBI, MI);
+        // Allocate NumBytes-4 bytes on stack. We'll also use 4 already
+        // allocated bytes for EAX.
+        MI = BuildMI(TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes-4);
+        MBB.insert(MBBI, MI);
+        MI = BuildMI(TII.get(X86::CALLpcrel32)).addExternalSymbol("_alloca");
+        MBB.insert(MBBI, MI);
+        // Restore EAX
+        MI = addRegOffset(BuildMI(TII.get(X86::MOV32rm), X86::EAX),
+                          StackPtr, NumBytes-4);
+        MBB.insert(MBBI, MI);
+      }
     } else {
       unsigned Opc = (NumBytes < 128) ?
         (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) :






More information about the llvm-commits mailing list