[llvm-commits] [llvm] r145766 - in /llvm/trunk: lib/Target/X86/X86FrameLowering.cpp test/CodeGen/X86/segmented-stacks.ll

Sanjoy Das sanjoy at playingwithpointers.com
Sat Dec 3 01:32:07 PST 2011


Author: sanjoy
Date: Sat Dec  3 03:32:07 2011
New Revision: 145766

URL: http://llvm.org/viewvc/llvm-project?rev=145766&view=rev
Log:
Check for stack space more intelligently.

libgcc sets the stack limit field in TCB to 256 bytes above the actual
allocated stack limit.  This means if the function's stack frame needs
less than 256 bytes, we can just compare the stack pointer with the
stack limit.  This should result in lesser calls to __morestack.

Modified:
    llvm/trunk/lib/Target/X86/X86FrameLowering.cpp
    llvm/trunk/test/CodeGen/X86/segmented-stacks.ll

Modified: llvm/trunk/lib/Target/X86/X86FrameLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86FrameLowering.cpp?rev=145766&r1=145765&r2=145766&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86FrameLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86FrameLowering.cpp Sat Dec  3 03:32:07 2011
@@ -1306,6 +1306,10 @@
   }
 }
 
+// The stack limit in the TCB is set to this many bytes above the actual stack
+// limit.
+static const uint64_t kSplitStackAvailable = 256;
+
 void
 X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
   MachineBasicBlock &prologueMBB = MF.front();
@@ -1360,16 +1364,24 @@
     TlsReg = X86::FS;
     TlsOffset = 0x70;
 
-    BuildMI(checkMBB, DL, TII.get(X86::LEA64r), ScratchReg).addReg(X86::RSP)
-      .addImm(0).addReg(0).addImm(-StackSize).addReg(0);
+    if (StackSize < kSplitStackAvailable)
+      ScratchReg = X86::RSP;
+    else
+      BuildMI(checkMBB, DL, TII.get(X86::LEA64r), ScratchReg).addReg(X86::RSP)
+        .addImm(0).addReg(0).addImm(-StackSize).addReg(0);
+
     BuildMI(checkMBB, DL, TII.get(X86::CMP64rm)).addReg(ScratchReg)
       .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
   } else {
     TlsReg = X86::GS;
     TlsOffset = 0x30;
 
-    BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
-      .addImm(0).addReg(0).addImm(-StackSize).addReg(0);
+    if (StackSize < kSplitStackAvailable)
+      ScratchReg = X86::ESP;
+    else
+      BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
+        .addImm(0).addReg(0).addImm(-StackSize).addReg(0);
+
     BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
       .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
   }

Modified: llvm/trunk/test/CodeGen/X86/segmented-stacks.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/segmented-stacks.ll?rev=145766&r1=145765&r2=145766&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/segmented-stacks.ll (original)
+++ llvm/trunk/test/CodeGen/X86/segmented-stacks.ll Sat Dec  3 03:32:07 2011
@@ -20,8 +20,7 @@
 
 ; X32:      test_basic:
 
-; X32:      leal -12(%esp), %ecx
-; X32-NEXT: cmpl %gs:48, %ecx
+; X32:      cmpl %gs:48, %esp
 
 ; X32:      pushl $4
 ; X32-NEXT: pushl $12
@@ -41,8 +40,7 @@
 
 ; X64:      test_basic:
 
-; X64:      leaq -24(%rsp), %r11
-; X64-NEXT: cmpq %fs:112, %r11
+; X64:      cmpq %fs:112, %rsp
 
 ; X64:      movabsq $24, %r10
 ; X64-NEXT: movabsq $0, %r11
@@ -66,17 +64,14 @@
        %result = add i32 %other, %addend
        ret i32 %result
 
-; X32:      leal (%esp), %edx
-; X32-NEXT: cmpl %gs:48, %edx
-
+; X32:      cmpl %gs:48, %esp
 
 ; X32:      pushl $4
 ; X32-NEXT: pushl $0
 ; X32-NEXT: calll __morestack
 ; X32-NEXT: ret
 
-; X64:      leaq (%rsp), %r11
-; X64-NEXT: cmpq %fs:112, %r11
+; X64:      cmpq %fs:112, %rsp
 
 ; X64:      movq %r10, %rax
 ; X64-NEXT: movabsq $0, %r10
@@ -86,3 +81,26 @@
 ; X64-NEXT: movq %rax, %r10
 
 }
+
+define void @test_large() {
+        %mem = alloca i32, i32 10000
+        call void @dummy_use (i32* %mem, i32 0)
+        ret void
+
+; X32:      leal -40012(%esp), %ecx
+; X32-NEXT: cmpl %gs:48, %ecx
+
+; X32:      pushl $0
+; X32-NEXT: pushl $40012
+; X32-NEXT: calll __morestack
+; X32-NEXT: ret
+
+; X64:      leaq -40008(%rsp), %r11
+; X64-NEXT: cmpq %fs:112, %r11
+
+; X64:      movabsq $40008, %r10
+; X64-NEXT: movabsq $0, %r11
+; X64-NEXT: callq __morestack
+; X64-NEXT: ret
+
+}





More information about the llvm-commits mailing list