[llvm-commits] [llvm] r147959 - in /llvm/trunk/test/CodeGen/X86: segmented-stacks-dynamic.ll segmented-stacks.ll

Rafael Espindola rafael.espindola at gmail.com
Wed Jan 11 10:51:03 PST 2012


Author: rafael
Date: Wed Jan 11 12:51:03 2012
New Revision: 147959

URL: http://llvm.org/viewvc/llvm-project?rev=147959&view=rev
Log:
Split segmented stacks tests into tests for static- and dynamic-size frames.
Patch by Brian Anderson.

Added:
    llvm/trunk/test/CodeGen/X86/segmented-stacks-dynamic.ll
Modified:
    llvm/trunk/test/CodeGen/X86/segmented-stacks.ll

Added: llvm/trunk/test/CodeGen/X86/segmented-stacks-dynamic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/segmented-stacks-dynamic.ll?rev=147959&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/segmented-stacks-dynamic.ll (added)
+++ llvm/trunk/test/CodeGen/X86/segmented-stacks-dynamic.ll Wed Jan 11 12:51:03 2012
@@ -0,0 +1,64 @@
+; RUN: llc < %s -mtriple=i686-linux -segmented-stacks -verify-machineinstrs | FileCheck %s -check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-linux  -segmented-stacks -verify-machineinstrs | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mtriple=i686-linux -segmented-stacks -filetype=obj
+; RUN: llc < %s -mtriple=x86_64-linux -segmented-stacks -filetype=obj
+
+; Just to prevent the alloca from being optimized away
+declare void @dummy_use(i32*, i32)
+
+define i32 @test_basic(i32 %l) {
+        %mem = alloca i32, i32 %l
+        call void @dummy_use (i32* %mem, i32 %l)
+        %terminate = icmp eq i32 %l, 0
+        br i1 %terminate, label %true, label %false
+
+true:
+        ret i32 0
+
+false:
+        %newlen = sub i32 %l, 1
+        %retvalue = call i32 @test_basic(i32 %newlen)
+        ret i32 %retvalue
+
+; X32:      test_basic:
+
+; X32:      cmpl %gs:48, %esp
+; X32-NEXT: ja      .LBB0_2
+
+; X32:      pushl $4
+; X32-NEXT: pushl $12
+; X32-NEXT: calll __morestack
+; X32-NEXT: ret
+
+; X32:      movl %esp, %eax
+; X32-NEXT: subl %ecx, %eax
+; X32-NEXT: cmpl %eax, %gs:48
+
+; X32:      movl %eax, %esp
+
+; X32:      subl $12, %esp
+; X32-NEXT: pushl %ecx
+; X32-NEXT: calll __morestack_allocate_stack_space
+; X32-NEXT: addl $16, %esp
+
+; X64:      test_basic:
+
+; X64:      cmpq %fs:112, %rsp
+; X64-NEXT: ja      .LBB0_2
+
+; X64:      movabsq $24, %r10
+; X64-NEXT: movabsq $0, %r11
+; X64-NEXT: callq __morestack
+; X64-NEXT: ret
+
+; X64:      movq %rsp, %rdi
+; X64-NEXT: subq %rax, %rdi
+; X64-NEXT: cmpq %rdi, %fs:112
+
+; X64:      movq %rdi, %rsp
+
+; X64:      movq %rax, %rdi
+; X64-NEXT: callq __morestack_allocate_stack_space
+; X64-NEXT: movq %rax, %rdi
+
+}

Modified: llvm/trunk/test/CodeGen/X86/segmented-stacks.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/segmented-stacks.ll?rev=147959&r1=147958&r2=147959&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/segmented-stacks.ll (original)
+++ llvm/trunk/test/CodeGen/X86/segmented-stacks.ll Wed Jan 11 12:51:03 2012
@@ -8,61 +8,31 @@
 ; Just to prevent the alloca from being optimized away
 declare void @dummy_use(i32*, i32)
 
-define i32 @test_basic(i32 %l) {
-        %mem = alloca i32, i32 %l
-        call void @dummy_use (i32* %mem, i32 %l)
-        %terminate = icmp eq i32 %l, 0
-        br i1 %terminate, label %true, label %false
-
-true:
-        ret i32 0
-
-false:
-        %newlen = sub i32 %l, 1
-        %retvalue = call i32 @test_basic(i32 %newlen)
-        ret i32 %retvalue
+define void @test_basic() {
+        %mem = alloca i32, i32 10
+        call void @dummy_use (i32* %mem, i32 10)
+	ret void
 
 ; X32:      test_basic:
 
 ; X32:      cmpl %gs:48, %esp
 ; X32-NEXT: ja      .LBB0_2
 
-; X32:      pushl $4
-; X32-NEXT: pushl $12
+; X32:      pushl $0
+; X32-NEXT: pushl $60
 ; X32-NEXT: calll __morestack
 ; X32-NEXT: ret 
 
-; X32:      movl %esp, %eax
-; X32-NEXT: subl %ecx, %eax
-; X32-NEXT: cmpl %eax, %gs:48
-
-; X32:      movl %eax, %esp
-
-; X32:      subl $12, %esp
-; X32-NEXT: pushl %ecx
-; X32-NEXT: calll __morestack_allocate_stack_space
-; X32-NEXT: addl $16, %esp
-
 ; X64:      test_basic:
 
 ; X64:      cmpq %fs:112, %rsp
 ; X64-NEXT: ja      .LBB0_2
 
-; X64:      movabsq $24, %r10
+; X64:      movabsq $40, %r10
 ; X64-NEXT: movabsq $0, %r11
 ; X64-NEXT: callq __morestack
 ; X64-NEXT: ret
 
-; X64:      movq %rsp, %rdi
-; X64-NEXT: subq %rax, %rdi
-; X64-NEXT: cmpq %rdi, %fs:112
-
-; X64:      movq %rdi, %rsp
-
-; X64:      movq %rax, %rdi
-; X64-NEXT: callq __morestack_allocate_stack_space
-; X64-NEXT: movq %rax, %rdi
-
 }
 
 define i32 @test_nested(i32 * nest %closure, i32 %other) {





More information about the llvm-commits mailing list