[llvm-commits] [llvm] r127733 - in /llvm/trunk/test/CodeGen/X86: apm.ll h-register-store.ll h-registers-0.ll peep-vector-extract-concat.ll pmulld.ll v2f32.ll widen_load-0.ll

NAKAMURA Takumi geek4civic at gmail.com
Wed Mar 16 06:52:51 PDT 2011


Author: chapuni
Date: Wed Mar 16 08:52:51 2011
New Revision: 127733

URL: http://llvm.org/viewvc/llvm-project?rev=127733&view=rev
Log:
test/CodeGen/X86: Add a pattern for Win64.

Modified:
    llvm/trunk/test/CodeGen/X86/apm.ll
    llvm/trunk/test/CodeGen/X86/h-register-store.ll
    llvm/trunk/test/CodeGen/X86/h-registers-0.ll
    llvm/trunk/test/CodeGen/X86/peep-vector-extract-concat.ll
    llvm/trunk/test/CodeGen/X86/pmulld.ll
    llvm/trunk/test/CodeGen/X86/v2f32.ll
    llvm/trunk/test/CodeGen/X86/widen_load-0.ll

Modified: llvm/trunk/test/CodeGen/X86/apm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/apm.ll?rev=127733&r1=127732&r2=127733&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/apm.ll (original)
+++ llvm/trunk/test/CodeGen/X86/apm.ll Wed Mar 16 08:52:51 2011
@@ -1,10 +1,16 @@
-; RUN: llc < %s -o - -march=x86-64 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s -check-prefix=WIN64
 ; PR8573
 
 ; CHECK: foo:
 ; CHECK: leaq    (%rdi), %rax
 ; CHECK-NEXT: movl    %esi, %ecx
 ; CHECK-NEXT: monitor
+; WIN64: foo:
+; WIN64:      leaq    (%rcx), %rax
+; WIN64-NEXT: movl    %edx, %ecx
+; WIN64-NEXT: movl    %r8d, %edx
+; WIN64-NEXT: monitor
 define void @foo(i8* %P, i32 %E, i32 %H) nounwind {
 entry:
   tail call void @llvm.x86.sse3.monitor(i8* %P, i32 %E, i32 %H)
@@ -17,6 +23,9 @@
 ; CHECK: movl    %edi, %ecx
 ; CHECK-NEXT: movl    %esi, %eax
 ; CHECK-NEXT: mwait
+; WIN64: bar:
+; WIN64:      movl    %edx, %eax
+; WIN64-NEXT: mwait
 define void @bar(i32 %E, i32 %H) nounwind {
 entry:
   tail call void @llvm.x86.sse3.mwait(i32 %E, i32 %H)

Modified: llvm/trunk/test/CodeGen/X86/h-register-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/h-register-store.ll?rev=127733&r1=127732&r2=127733&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/h-register-store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/h-register-store.ll Wed Mar 16 08:52:51 2011
@@ -1,9 +1,29 @@
-; RUN: llc < %s -march=x86-64 > %t
-; RUN: grep mov %t | count 6
-; RUN: grep {movb	%ah, (%rsi)} %t | count 3
-; RUN: llc < %s -march=x86 > %t
-; RUN: grep mov %t | count 3
-; RUN: grep {movb	%ah, (%e} %t | count 3
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefix=X64
+; X64:      mov
+; X64-NEXT: movb %ah, (%rsi)
+; X64:      mov
+; X64-NEXT: movb %ah, (%rsi)
+; X64:      mov
+; X64-NEXT: movb %ah, (%rsi)
+; X64-NOT:      mov
+
+; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s -check-prefix=W64
+; W64-NOT:      mov
+; W64:      movb %ch, (%rdx)
+; W64-NOT:      mov
+; W64:      movb %ch, (%rdx)
+; W64-NOT:      mov
+; W64:      movb %ch, (%rdx)
+; W64-NOT:      mov
+
+; RUN: llc < %s -march=x86 | FileCheck %s -check-prefix=X32
+; X32-NOT:      mov
+; X32:      movb %ah, (%e
+; X32-NOT:      mov
+; X32:      movb %ah, (%e
+; X32-NOT:      mov
+; X32:      movb %ah, (%e
+; X32-NOT:      mov
 
 ; Use h-register extract and store.
 

Modified: llvm/trunk/test/CodeGen/X86/h-registers-0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/h-registers-0.ll?rev=127733&r1=127732&r2=127733&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/h-registers-0.ll (original)
+++ llvm/trunk/test/CodeGen/X86/h-registers-0.ll Wed Mar 16 08:52:51 2011
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s -check-prefix=X86-64
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefix=X86-64
+; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s -check-prefix=WIN64
 ; RUN: llc < %s -march=x86    | FileCheck %s -check-prefix=X86-32
 
 ; Use h registers. On x86-64, codegen doesn't support general allocation
@@ -9,6 +10,12 @@
 ; X86-64: shrq $8, %rdi
 ; X86-64: incb %dil
 
+; See FIXME: on regclass GR8.
+; It could be optimally transformed like; incb %ch; movb %ch, (%rdx)
+; WIN64:  bar64:
+; WIN64:  shrq $8, %rcx
+; WIN64:  incb %cl
+
 ; X86-32: bar64:
 ; X86-32: incb %ah
   %t0 = lshr i64 %x, 8
@@ -23,6 +30,10 @@
 ; X86-64: shrl $8, %edi
 ; X86-64: incb %dil
 
+; WIN64:  bar32:
+; WIN64:  shrl $8, %ecx
+; WIN64:  incb %cl
+
 ; X86-32: bar32:
 ; X86-32: incb %ah
   %t0 = lshr i32 %x, 8
@@ -37,6 +48,10 @@
 ; X86-64: shrl $8, %edi
 ; X86-64: incb %dil
 
+; WIN64:  bar16:
+; WIN64:  shrl $8, %ecx
+; WIN64:  incb %cl
+
 ; X86-32: bar16:
 ; X86-32: incb %ah
   %t0 = lshr i16 %x, 8
@@ -51,6 +66,9 @@
 ; X86-64: movq %rdi, %rax
 ; X86-64: movzbl %ah, %eax
 
+; WIN64:  qux64:
+; WIN64:  movzbl %ch, %eax
+
 ; X86-32: qux64:
 ; X86-32: movzbl %ah, %eax
   %t0 = lshr i64 %x, 8
@@ -63,6 +81,9 @@
 ; X86-64: movl %edi, %eax
 ; X86-64: movzbl %ah, %eax
 
+; WIN64:  qux32:
+; WIN64:  movzbl %ch, %eax
+
 ; X86-32: qux32:
 ; X86-32: movzbl %ah, %eax
   %t0 = lshr i32 %x, 8
@@ -75,6 +96,9 @@
 ; X86-64: movl %edi, %eax
 ; X86-64: movzbl %ah, %eax
 
+; WIN64:  qux16:
+; WIN64:  movzbl %ch, %eax
+
 ; X86-32: qux16:
 ; X86-32: movzbl %ah, %eax
   %t0 = lshr i16 %x, 8

Modified: llvm/trunk/test/CodeGen/X86/peep-vector-extract-concat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/peep-vector-extract-concat.ll?rev=127733&r1=127732&r2=127733&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/peep-vector-extract-concat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/peep-vector-extract-concat.ll Wed Mar 16 08:52:51 2011
@@ -1,4 +1,9 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse2,-sse41 | grep {pshufd	\$3, %xmm0, %xmm0}
+; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse2,-sse41 | FileCheck %s
+; CHECK: pshufd $3, %xmm0, %xmm0
+
+; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+sse2,-sse41 | FileCheck %s -check-prefix=WIN64
+; %a is passed indirectly on Win64.
+; WIN64: movss   12(%rcx), %xmm0
 
 define float @foo(<8 x float> %a) nounwind {
   %c = extractelement <8 x float> %a, i32 3

Modified: llvm/trunk/test/CodeGen/X86/pmulld.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pmulld.ll?rev=127733&r1=127732&r2=127733&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pmulld.ll (original)
+++ llvm/trunk/test/CodeGen/X86/pmulld.ll Wed Mar 16 08:52:51 2011
@@ -1,8 +1,13 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse41 -asm-verbose=0 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse41 -asm-verbose=0 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+sse41 -asm-verbose=0 | FileCheck %s -check-prefix=WIN64
 
 define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) nounwind {
 ; CHECK: test1:
 ; CHECK-NEXT: pmulld
+
+; WIN64: test1:
+; WIN64-NEXT: movdqa  (%rcx), %xmm0
+; WIN64-NEXT: pmulld  (%rdx), %xmm0
   %C = mul <4 x i32> %A, %B
   ret <4 x i32> %C
 }
@@ -10,6 +15,11 @@
 define <4 x i32> @test1a(<4 x i32> %A, <4 x i32> *%Bp) nounwind {
 ; CHECK: test1a:
 ; CHECK-NEXT: pmulld
+
+; WIN64: test1a:
+; WIN64-NEXT: movdqa  (%rcx), %xmm0
+; WIN64-NEXT: pmulld  (%rdx), %xmm0
+
   %B = load <4 x i32>* %Bp
   %C = mul <4 x i32> %A, %B
   ret <4 x i32> %C

Modified: llvm/trunk/test/CodeGen/X86/v2f32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/v2f32.ll?rev=127733&r1=127732&r2=127733&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/v2f32.ll (original)
+++ llvm/trunk/test/CodeGen/X86/v2f32.ll Wed Mar 16 08:52:51 2011
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=x86-64 -mcpu=penryn -asm-verbose=0 -o - | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-linux -mcpu=penryn -asm-verbose=0 -o - | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-win32 -mcpu=penryn -asm-verbose=0 -o - | FileCheck %s -check-prefix=W64
 ; RUN: llc < %s -mcpu=yonah -march=x86 -asm-verbose=0 -o - | FileCheck %s -check-prefix=X32
 
 ; PR7518
@@ -15,6 +16,13 @@
 ; X64-NEXT: movss	%xmm1, (%rdi)
 ; X64-NEXT: ret
 
+; W64: test1:
+; W64-NEXT: movdqa  (%rcx), %xmm0
+; W64-NEXT: pshufd  $1, %xmm0, %xmm1
+; W64-NEXT: addss   %xmm0, %xmm1
+; W64-NEXT: movss   %xmm1, (%rdx)
+; W64-NEXT: ret
+
 ; X32: test1:
 ; X32-NEXT: pshufd	$1, %xmm0, %xmm1
 ; X32-NEXT: addss	%xmm0, %xmm1
@@ -31,6 +39,14 @@
 ; X64: test2:
 ; X64-NEXT: addps	%xmm1, %xmm0
 ; X64-NEXT: ret
+
+; W64: test2:
+; W64-NEXT: movaps  (%rcx), %xmm0
+; W64-NEXT: addps   (%rdx), %xmm0
+; W64-NEXT: ret
+
+; X32: test2:
+; X32:      addps	%xmm1, %xmm0
 }
 
 
@@ -38,17 +54,35 @@
 	%B = shufflevector <4 x float> %A, <4 x float> undef, <2 x i32> <i32 0, i32 1>
 	%C = fadd <2 x float> %B, %B
 	ret <2 x float> %C
-; CHECK: test3:
-; CHECK-NEXT: 	addps	%xmm0, %xmm0
-; CHECK-NEXT: 	ret
+; X64: test3:
+; X64-NEXT: addps	%xmm0, %xmm0
+; X64-NEXT: ret
+
+; W64: test3:
+; W64-NEXT: movaps  (%rcx), %xmm0
+; W64-NEXT: addps   %xmm0, %xmm0
+; W64-NEXT: ret
+
+; X32: test3:
+; X32-NEXT: addps	%xmm0, %xmm0
+; X32-NEXT: ret
 }
 
 define <2 x float> @test4(<2 x float> %A) nounwind {
 	%C = fadd <2 x float> %A, %A
 	ret <2 x float> %C
-; CHECK: test4:
-; CHECK-NEXT: 	addps	%xmm0, %xmm0
-; CHECK-NEXT: 	ret
+; X64: test4:
+; X64-NEXT: addps	%xmm0, %xmm0
+; X64-NEXT: ret
+
+; W64: test4:
+; W64-NEXT: movaps  (%rcx), %xmm0
+; W64-NEXT: addps   %xmm0, %xmm0
+; W64-NEXT: ret
+
+; X32: test4:
+; X32-NEXT: addps	%xmm0, %xmm0
+; X32-NEXT: ret
 }
 
 define <4 x float> @test5(<4 x float> %A) nounwind {
@@ -61,10 +95,21 @@
 	%E = shufflevector <2 x float> %D, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
 	ret <4 x float> %E
         
-; CHECK: _test5:
-; CHECK-NEXT: 	addps	%xmm0, %xmm0
-; CHECK-NEXT: 	addps	%xmm0, %xmm0
-; CHECK-NEXT: 	ret
+; X64: test5:
+; X64-NEXT: addps	%xmm0, %xmm0
+; X64-NEXT: addps	%xmm0, %xmm0
+; X64-NEXT: ret
+
+; W64: test5:
+; W64-NEXT: movaps  (%rcx), %xmm0
+; W64-NEXT: addps   %xmm0, %xmm0
+; W64-NEXT: addps   %xmm0, %xmm0
+; W64-NEXT: ret
+
+; X32: test5:
+; X32-NEXT: addps	%xmm0, %xmm0
+; X32-NEXT: addps	%xmm0, %xmm0
+; X32-NEXT: ret
 }
 
 

Modified: llvm/trunk/test/CodeGen/X86/widen_load-0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_load-0.ll?rev=127733&r1=127732&r2=127733&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_load-0.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_load-0.ll Wed Mar 16 08:52:51 2011
@@ -1,4 +1,5 @@
-; RUN: llc < %s -o - -march=x86-64 | FileCheck %s
+; RUN: llc < %s -o - -mtriple=x86_64-linux | FileCheck %s
+; RUN: llc < %s -o - -mtriple=x86_64-win32 | FileCheck %s -check-prefix=WIN64
 ; PR4891
 
 ; Both loads should happen before either store.
@@ -8,6 +9,11 @@
 ; CHECK: movl  %ecx, (%rdi)
 ; CHECK: movl  %eax, (%rsi)
 
+; WIN64: movl  (%rcx), %eax
+; WIN64: movl  (%rdx), %esi
+; WIN64: movl  %esi, (%rcx)
+; WIN64: movl  %eax, (%rdx)
+
 define void @short2_int_swap(<2 x i16>* nocapture %b, i32* nocapture %c) nounwind {
 entry:
   %0 = load <2 x i16>* %b, align 2                ; <<2 x i16>> [#uses=1]





More information about the llvm-commits mailing list