[llvm-commits] [llvm] r127732 - in /llvm/trunk/test/CodeGen/X86: coalescer-commute2.ll fold-mul-lohi.ll scalar-min-max-fill-operand.ll sse-align-0.ll sse-align-3.ll sse-align-7.ll sse-commute.ll sse_reload_fold.ll stdarg.ll stride-nine-with-base-reg.ll stride-reuse.ll tailcallbyval64.ll

NAKAMURA Takumi geek4civic at gmail.com
Wed Mar 16 06:52:38 PDT 2011


Author: chapuni
Date: Wed Mar 16 08:52:38 2011
New Revision: 127732

URL: http://llvm.org/viewvc/llvm-project?rev=127732&view=rev
Log:
test/CodeGen/X86: FileCheck-ize and add explicit -mtriple=x86_64-linux. They are useless to Win64 target.

Modified:
    llvm/trunk/test/CodeGen/X86/coalescer-commute2.ll
    llvm/trunk/test/CodeGen/X86/fold-mul-lohi.ll
    llvm/trunk/test/CodeGen/X86/scalar-min-max-fill-operand.ll
    llvm/trunk/test/CodeGen/X86/sse-align-0.ll
    llvm/trunk/test/CodeGen/X86/sse-align-3.ll
    llvm/trunk/test/CodeGen/X86/sse-align-7.ll
    llvm/trunk/test/CodeGen/X86/sse-commute.ll
    llvm/trunk/test/CodeGen/X86/sse_reload_fold.ll
    llvm/trunk/test/CodeGen/X86/stdarg.ll
    llvm/trunk/test/CodeGen/X86/stride-nine-with-base-reg.ll
    llvm/trunk/test/CodeGen/X86/stride-reuse.ll
    llvm/trunk/test/CodeGen/X86/tailcallbyval64.ll

Modified: llvm/trunk/test/CodeGen/X86/coalescer-commute2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/coalescer-commute2.ll?rev=127732&r1=127731&r2=127732&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/coalescer-commute2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/coalescer-commute2.ll Wed Mar 16 08:52:38 2011
@@ -1,5 +1,10 @@
-; RUN: llc < %s -march=x86-64 | grep paddw | count 2
-; RUN: llc < %s -march=x86-64 | not grep mov
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; CHECK-NOT:     mov
+; CHECK:     paddw
+; CHECK-NOT:     mov
+; CHECK:     paddw
+; CHECK-NOT:     paddw
+; CHECK-NOT:     mov
 
 ; The 2-addr pass should ensure that identical code is produced for these functions
 ; no extra copy should be generated.

Modified: llvm/trunk/test/CodeGen/X86/fold-mul-lohi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-mul-lohi.ll?rev=127732&r1=127731&r2=127732&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-mul-lohi.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-mul-lohi.ll Wed Mar 16 08:52:38 2011
@@ -1,5 +1,6 @@
-; RUN: llc < %s -march=x86 | not grep lea
-; RUN: llc < %s -march=x86-64 | not grep lea
+; RUN: llc < %s -march=x86            | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; CHECK-NOT: lea
 
 @B = external global [1000 x i8], align 32
 @A = external global [1000 x i8], align 32

Modified: llvm/trunk/test/CodeGen/X86/scalar-min-max-fill-operand.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/scalar-min-max-fill-operand.ll?rev=127732&r1=127731&r2=127732&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/scalar-min-max-fill-operand.ll (original)
+++ llvm/trunk/test/CodeGen/X86/scalar-min-max-fill-operand.ll Wed Mar 16 08:52:38 2011
@@ -1,6 +1,13 @@
-; RUN: llc < %s -march=x86-64 | grep min | count 1
-; RUN: llc < %s -march=x86-64 | grep max | count 1
-; RUN: llc < %s -march=x86-64 | grep mov | count 2
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; CHECK-NOT:     {{(min|max|mov)}}
+; CHECK:     mov
+; CHECK-NOT:     {{(min|max|mov)}}
+; CHECK:     min
+; CHECK-NOT:     {{(min|max|mov)}}
+; CHECK:     mov
+; CHECK-NOT:     {{(min|max|mov)}}
+; CHECK:     max
+; CHECK-NOT:     {{(min|max|mov)}}
 
 declare float @bar()
 

Modified: llvm/trunk/test/CodeGen/X86/sse-align-0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-align-0.ll?rev=127732&r1=127731&r2=127732&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-align-0.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-align-0.ll Wed Mar 16 08:52:38 2011
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=x86-64 | not grep mov
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; CHECK-NOT:     mov
 
 define <4 x float> @foo(<4 x float>* %p, <4 x float> %x) nounwind {
   %t = load <4 x float>* %p

Modified: llvm/trunk/test/CodeGen/X86/sse-align-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-align-3.ll?rev=127732&r1=127731&r2=127732&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-align-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-align-3.ll Wed Mar 16 08:52:38 2011
@@ -1,4 +1,9 @@
-; RUN: llc < %s -march=x86-64 | grep movap | count 2
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; CHECK-NOT:     movapd
+; CHECK:     movaps
+; CHECK-NOT:     movaps
+; CHECK:     movapd
+; CHECK-NOT:     movap
 
 define void @foo(<4 x float>* %p, <4 x float> %x) nounwind {
   store <4 x float> %x, <4 x float>* %p

Modified: llvm/trunk/test/CodeGen/X86/sse-align-7.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-align-7.ll?rev=127732&r1=127731&r2=127732&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-align-7.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-align-7.ll Wed Mar 16 08:52:38 2011
@@ -1,4 +1,6 @@
-; RUN: llc < %s -march=x86-64 | grep movaps | count 1
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; CHECK:     movaps
+; CHECK-NOT:     movaps
 
 define void @bar(<2 x i64>* %p, <2 x i64> %x) nounwind {
   store <2 x i64> %x, <2 x i64>* %p

Modified: llvm/trunk/test/CodeGen/X86/sse-commute.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-commute.ll?rev=127732&r1=127731&r2=127732&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-commute.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-commute.ll Wed Mar 16 08:52:38 2011
@@ -1,4 +1,4 @@
-; RUN: llc -march=x86-64 < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-linux < %s | FileCheck %s
 
 ; Commute the comparison to avoid a move.
 ; PR7500.

Modified: llvm/trunk/test/CodeGen/X86/sse_reload_fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse_reload_fold.ll?rev=127732&r1=127731&r2=127732&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse_reload_fold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse_reload_fold.ll Wed Mar 16 08:52:38 2011
@@ -1,5 +1,6 @@
-; RUN: llc < %s -march=x86-64 -mattr=+64bit,+sse3 -print-failed-fuse-candidates |& \
-; RUN:   grep fail | count 1
+; RUN: llc < %s -mtriple=x86_64-linux -mattr=+64bit,+sse3 -print-failed-fuse-candidates |& FileCheck %s
+; CHECK: fail
+; CHECK-NOT: fail
 
 declare float @test_f(float %f)
 declare double @test_d(double %f)

Modified: llvm/trunk/test/CodeGen/X86/stdarg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stdarg.ll?rev=127732&r1=127731&r2=127732&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/stdarg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/stdarg.ll Wed Mar 16 08:52:38 2011
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=x86-64 | grep {testb	\[%\]al, \[%\]al}
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; CHECK: testb %al, %al
 
 %struct.__va_list_tag = type { i32, i32, i8*, i8* }
 

Modified: llvm/trunk/test/CodeGen/X86/stride-nine-with-base-reg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stride-nine-with-base-reg.ll?rev=127732&r1=127731&r2=127732&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/stride-nine-with-base-reg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/stride-nine-with-base-reg.ll Wed Mar 16 08:52:38 2011
@@ -1,5 +1,6 @@
-; RUN: llc < %s -march=x86 -relocation-model=static | not grep lea
-; RUN: llc < %s -march=x86-64 | not grep lea
+; RUN: llc < %s -march=x86 -relocation-model=static | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-linux               | FileCheck %s
+; CHECK-NOT:     lea
 
 ; P should be sunk into the loop and folded into the address mode. There
 ; shouldn't be any lea instructions inside the loop.

Modified: llvm/trunk/test/CodeGen/X86/stride-reuse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stride-reuse.ll?rev=127732&r1=127731&r2=127732&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/stride-reuse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/stride-reuse.ll Wed Mar 16 08:52:38 2011
@@ -1,5 +1,6 @@
-; RUN: llc < %s -march=x86 | not grep lea
-; RUN: llc < %s -march=x86-64 | not grep lea
+; RUN: llc < %s -march=x86            | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; CHECK-NOT:     lea
 
 @B = external global [1000 x float], align 32
 @A = external global [1000 x float], align 32

Modified: llvm/trunk/test/CodeGen/X86/tailcallbyval64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/tailcallbyval64.ll?rev=127732&r1=127731&r2=127732&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/tailcallbyval64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/tailcallbyval64.ll Wed Mar 16 08:52:38 2011
@@ -1,15 +1,30 @@
-; RUN: llc < %s -march=x86-64  -tailcallopt  | grep TAILCALL
+; RUN: llc < %s -mtriple=x86_64-linux  -tailcallopt  | FileCheck %s
+
+; FIXME: Win64 does not support byval.
+
+; Expect the entry point.
+; CHECK: tailcaller:
+
 ; Expect 2 rep;movs because of tail call byval lowering.
-; RUN: llc < %s -march=x86-64  -tailcallopt  | grep rep | wc -l | grep 2
+; CHECK: rep;
+; CHECK: rep;
+
 ; A sequence of copyto/copyfrom virtual registers is used to deal with byval
 ; lowering appearing after moving arguments to registers. The following two
 ; checks verify that the register allocator changes those sequences to direct
 ; moves to argument register where it can (for registers that are not used in 
 ; byval lowering - not rsi, not rdi, not rcx).
 ; Expect argument 4 to be moved directly to register edx.
-; RUN: llc < %s -march=x86-64  -tailcallopt  | grep movl | grep {7} | grep edx
+; CHECK: movl $7, %edx
+
 ; Expect argument 6 to be moved directly to register r8.
-; RUN: llc < %s -march=x86-64  -tailcallopt  | grep movl | grep {17} | grep r8
+; CHECK: movl $17, %r8d
+
+; Expect not call but jmp to @tailcallee.
+; CHECK: jmp tailcallee
+
+; Expect the trailer.
+; CHECK: .size tailcaller
 
 %struct.s = type { i64, i64, i64, i64, i64, i64, i64, i64,
                    i64, i64, i64, i64, i64, i64, i64, i64,
@@ -25,5 +40,3 @@
         %tmp4 = tail call fastcc i64 @tailcallee(%struct.s* %a byval, i64 %tmp3, i64 %b, i64 7, i64 13, i64 17)
         ret i64 %tmp4
 }
-
-





More information about the llvm-commits mailing list