[llvm-commits] [llvm] r122955 - in /llvm/trunk: lib/Target/X86/X86ISelLowering.cpp test/CodeGen/X86/2010-04-08-CoalescerBug.ll test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll test/CodeGen/X86/memcpy-2.ll test/CodeGen/X86/memcpy.ll test/CodeGen/X86/memset-2.ll test/CodeGen/X86/memset64-on-x86-32.ll test/CodeGen/X86/small-byval-memcpy.ll test/CodeGen/X86/tlv-1.ll test/CodeGen/X86/unaligned-load.ll

Evan Cheng evan.cheng at apple.com
Wed Jan 5 23:58:36 PST 2011


Author: evancheng
Date: Thu Jan  6 01:58:36 2011
New Revision: 122955

URL: http://llvm.org/viewvc/llvm-project?rev=122955&view=rev
Log:
Use movups to lower memcpy and memset even if it's not fast (like corei7).
The theory is it's still faster than a pair of movq / a quad of movl. This
will probably hurt older chips like P4 but should run faster on current
and future Intel processors. rdar://8817010

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/2010-04-08-CoalescerBug.ll
    llvm/trunk/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll
    llvm/trunk/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll
    llvm/trunk/test/CodeGen/X86/memcpy-2.ll
    llvm/trunk/test/CodeGen/X86/memcpy.ll
    llvm/trunk/test/CodeGen/X86/memset-2.ll
    llvm/trunk/test/CodeGen/X86/memset64-on-x86-32.ll
    llvm/trunk/test/CodeGen/X86/small-byval-memcpy.ll
    llvm/trunk/test/CodeGen/X86/tlv-1.ll
    llvm/trunk/test/CodeGen/X86/unaligned-load.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=122955&r1=122954&r2=122955&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Thu Jan  6 01:58:36 2011
@@ -1063,12 +1063,8 @@
   // linux.  This is because the stack realignment code can't handle certain
   // cases like PR2962.  This should be removed when PR2962 is fixed.
   const Function *F = MF.getFunction();
-  if (NonScalarIntSafe &&
-      !F->hasFnAttr(Attribute::NoImplicitFloat)) {
+  if (NonScalarIntSafe && !F->hasFnAttr(Attribute::NoImplicitFloat)) {
     if (Size >= 16 &&
-        (Subtarget->isUnalignedMemAccessFast() ||
-         ((DstAlign == 0 || DstAlign >= 16) &&
-          (SrcAlign == 0 || SrcAlign >= 16))) &&
         Subtarget->getStackAlignment() >= 16) {
       if (Subtarget->hasSSE2())
         return MVT::v4i32;

Modified: llvm/trunk/test/CodeGen/X86/2010-04-08-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-04-08-CoalescerBug.ll?rev=122955&r1=122954&r2=122955&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-04-08-CoalescerBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-04-08-CoalescerBug.ll Thu Jan  6 01:58:36 2011
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
 ; rdar://7842028
 
 ; Do not delete partially dead copy instructions.
@@ -9,7 +9,7 @@
 %struct.F = type { %struct.FC*, i32, i32, i8, i32, i32, i32 }
 %struct.FC = type { [10 x i8], [32 x i32], %struct.FC*, i32 }
 
-define void @t(%struct.F* %this) nounwind {
+define void @t(%struct.F* %this) nounwind optsize {
 entry:
 ; CHECK: t:
 ; CHECK: addq $12, %rsi

Modified: llvm/trunk/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll?rev=122955&r1=122954&r2=122955&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll Thu Jan  6 01:58:36 2011
@@ -26,7 +26,7 @@
 ; CHECK: rep;stosl
 
   %tmp5 = bitcast i32* %tmp4 to i8*
-  call void @llvm.memset.p0i8.i64(i8* %tmp5, i8 0, i64 84, i32 4, i1 false)
+  call void @llvm.memset.p0i8.i64(i8* %tmp5, i8 0, i64 124, i32 4, i1 false)
   %tmp6 = getelementptr inbounds %struct.type* %s, i32 0, i32 62
   store i32* null, i32** %tmp6, align 8
   br label %bb1

Modified: llvm/trunk/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll?rev=122955&r1=122954&r2=122955&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll Thu Jan  6 01:58:36 2011
@@ -19,8 +19,8 @@
 }
 
 ; CHECK: movq	___stack_chk_guard at GOTPCREL(%rip), %rax
-; CHECK: movb	30(%rsp), %dl
-; CHECK: movb	(%rsp), %sil
-; CHECK: movb	%sil, (%rsp)
-; CHECK: movb	%dl, 30(%rsp)
+; CHECK: movb	30(%rsp), %cl
+; CHECK: movb	(%rsp), %dl
+; CHECK: movb	%dl, (%rsp)
+; CHECK: movb	%cl, 30(%rsp)
 ; CHECK: callq	___stack_chk_fail

Modified: llvm/trunk/test/CodeGen/X86/memcpy-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/memcpy-2.ll?rev=122955&r1=122954&r2=122955&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/memcpy-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/memcpy-2.ll Thu Jan  6 01:58:36 2011
@@ -1,5 +1,4 @@
 ; RUN: llc < %s -mattr=+sse2      -mtriple=i686-apple-darwin -mcpu=core2 | FileCheck %s -check-prefix=SSE2
-; RUN: llc < %s -mattr=+sse,-sse2 -mtriple=i686-apple-darwin -mcpu=core2 | FileCheck %s -check-prefix=SSE1
 ; RUN: llc < %s -mattr=-sse       -mtriple=i686-apple-darwin -mcpu=core2 | FileCheck %s -check-prefix=NOSSE
 ; RUN: llc < %s                 -mtriple=x86_64-apple-darwin -mcpu=core2 | FileCheck %s -check-prefix=X86-64
 
@@ -15,13 +14,6 @@
 ; SSE2: movl $0
 ; SSE2: movl $0
 
-; SSE1: t1:
-; SSE1: movaps _.str, %xmm0
-; SSE1: movaps %xmm0
-; SSE1: movb $0
-; SSE1: movl $0
-; SSE1: movl $0
-
 ; NOSSE: t1:
 ; NOSSE: movb $0
 ; NOSSE: movl $0
@@ -51,10 +43,6 @@
 ; SSE2: movaps (%eax), %xmm0
 ; SSE2: movaps %xmm0, (%eax)
 
-; SSE1: t2:
-; SSE1: movaps (%eax), %xmm0
-; SSE1: movaps %xmm0, (%eax)
-
 ; NOSSE: t2:
 ; NOSSE: movl
 ; NOSSE: movl
@@ -79,22 +67,8 @@
 define void @t3(%struct.s0* nocapture %a, %struct.s0* nocapture %b) nounwind ssp {
 entry:
 ; SSE2: t3:
-; SSE2: movsd (%eax), %xmm0
-; SSE2: movsd 8(%eax), %xmm1
-; SSE2: movsd %xmm1, 8(%eax)
-; SSE2: movsd %xmm0, (%eax)
-
-; SSE1: t3:
-; SSE1: movl
-; SSE1: movl
-; SSE1: movl
-; SSE1: movl
-; SSE1: movl
-; SSE1: movl
-; SSE1: movl
-; SSE1: movl
-; SSE1: movl
-; SSE1: movl
+; SSE2: movups (%eax), %xmm0
+; SSE2: movups %xmm0, (%eax)
 
 ; NOSSE: t3:
 ; NOSSE: movl
@@ -109,10 +83,8 @@
 ; NOSSE: movl
 
 ; X86-64: t3:
-; X86-64: movq (%rsi), %rax
-; X86-64: movq 8(%rsi), %rcx
-; X86-64: movq %rcx, 8(%rdi)
-; X86-64: movq %rax, (%rdi)
+; X86-64: movups (%rsi), %xmm0
+; X86-64: movups %xmm0, (%rdi)
   %tmp2 = bitcast %struct.s0* %a to i8*           ; <i8*> [#uses=1]
   %tmp3 = bitcast %struct.s0* %b to i8*           ; <i8*> [#uses=1]
   tail call void @llvm.memcpy.i32(i8* %tmp2, i8* %tmp3, i32 16, i32 8)
@@ -122,24 +94,12 @@
 define void @t4() nounwind {
 entry:
 ; SSE2: t4:
-; SSE2: movw $120
-; SSE2: movl $2021161080
-; SSE2: movl $2021161080
-; SSE2: movl $2021161080
+; SSE2: movups _.str2, %xmm0
+; SSE2: movaps %xmm0, (%esp)
+; SSE2: movw $120, 28(%esp)
 ; SSE2: movl $2021161080
 ; SSE2: movl $2021161080
 ; SSE2: movl $2021161080
-; SSE2: movl $2021161080
-
-; SSE1: t4:
-; SSE1: movw $120
-; SSE1: movl $2021161080
-; SSE1: movl $2021161080
-; SSE1: movl $2021161080
-; SSE1: movl $2021161080
-; SSE1: movl $2021161080
-; SSE1: movl $2021161080
-; SSE1: movl $2021161080
 
 ; NOSSE: t4:
 ; NOSSE: movw $120
@@ -154,8 +114,8 @@
 ; X86-64: t4:
 ; X86-64: movabsq $8680820740569200760, %rax
 ; X86-64: movq %rax
-; X86-64: movq %rax
-; X86-64: movq %rax
+; X86-64: movups _.str2(%rip), %xmm0
+; X86-64: movaps %xmm0, -40(%rsp)
 ; X86-64: movw $120
 ; X86-64: movl $2021161080
   %tmp1 = alloca [30 x i8]

Modified: llvm/trunk/test/CodeGen/X86/memcpy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/memcpy.ll?rev=122955&r1=122954&r2=122955&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/memcpy.ll (original)
+++ llvm/trunk/test/CodeGen/X86/memcpy.ll Thu Jan  6 01:58:36 2011
@@ -37,26 +37,34 @@
   tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %A, i8* %B, i64 64, i32 1, i1 false)
   ret void
 ; LINUX: test3:
-; LINUX: memcpy
+; LINUX-NOT: memcpy
+; LINUX: movups
+; LINUX: movups
+; LINUX: movups
+; LINUX: movups
+; LINUX: movups
+; LINUX: movups
+; LINUX: movups
+; LINUX: movups
 
 ; DARWIN: test3:
 ; DARWIN-NOT: memcpy
-; DARWIN: movq
-; DARWIN: movq
-; DARWIN: movq
-; DARWIN: movq
-; DARWIN: movq
-; DARWIN: movq
-; DARWIN: movq
-; DARWIN: movq
-; DARWIN: movq
-; DARWIN: movq
-; DARWIN: movq
-; DARWIN: movq
-; DARWIN: movq
-; DARWIN: movq
-; DARWIN: movq
-; DARWIN: movq
+; DARWIN: movups
+; DARWIN: movups
+; DARWIN: movups
+; DARWIN: movups
+; DARWIN: movups
+; DARWIN: movups
+; DARWIN: movups
+; DARWIN: movups
+; DARWIN: movups
+; DARWIN: movups
+; DARWIN: movups
+; DARWIN: movups
+; DARWIN: movups
+; DARWIN: movups
+; DARWIN: movups
+; DARWIN: movups
 }
 
 ; Large constant memcpy's should be inlined when not optimizing for size.

Modified: llvm/trunk/test/CodeGen/X86/memset-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/memset-2.ll?rev=122955&r1=122954&r2=122955&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/memset-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/memset-2.ll Thu Jan  6 01:58:36 2011
@@ -5,7 +5,21 @@
 define fastcc void @t1() nounwind {
 entry:
 ; CHECK: t1:
-; CHECK: calll _memset
+; CHECK: pxor %xmm0, %xmm0
+; CHECK: movups %xmm0, 160
+; CHECK: movups %xmm0, 144
+; CHECK: movups %xmm0, 128
+; CHECK: movups %xmm0, 112
+; CHECK: movups %xmm0, 96
+; CHECK: movups %xmm0, 80
+; CHECK: movups %xmm0, 64
+; CHECK: movups %xmm0, 48
+; CHECK: movups %xmm0, 32
+; CHECK: movups %xmm0, 16
+; CHECK: movups %xmm0, 0
+; CHECK: movl $0, 184
+; CHECK: movl $0, 180
+; CHECK: movl $0, 176
   call void @llvm.memset.i32( i8* null, i8 0, i32 188, i32 1 ) nounwind
   unreachable
 }

Modified: llvm/trunk/test/CodeGen/X86/memset64-on-x86-32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/memset64-on-x86-32.ll?rev=122955&r1=122954&r2=122955&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/memset64-on-x86-32.ll (original)
+++ llvm/trunk/test/CodeGen/X86/memset64-on-x86-32.ll Thu Jan  6 01:58:36 2011
@@ -1,6 +1,5 @@
 ; RUN: llc < %s -mtriple=i386-apple-darwin   -mcpu=nehalem | grep movups | count 5
-; RUN: llc < %s -mtriple=i386-apple-darwin   -mcpu=core2   | grep movl   | count 20
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core2   | grep movq   | count 10
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core2   | grep movups   | count 5
 
 define void @bork() nounwind {
 entry:

Modified: llvm/trunk/test/CodeGen/X86/small-byval-memcpy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/small-byval-memcpy.ll?rev=122955&r1=122954&r2=122955&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/small-byval-memcpy.ll (original)
+++ llvm/trunk/test/CodeGen/X86/small-byval-memcpy.ll Thu Jan  6 01:58:36 2011
@@ -1,8 +1,12 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=core2   | grep movsd  | count 8
-; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=nehalem | grep movups | count 2
+; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=nehalem | FileCheck %s
 
 define void @ccosl({ x86_fp80, x86_fp80 }* noalias sret  %agg.result, { x86_fp80, x86_fp80 }* byval align 4  %z) nounwind  {
 entry:
+; CHECK: ccosl:
+; CHECK: movaps
+; CHECK: movaps
+; CHECK: movups
+; CHECK: movups
 	%iz = alloca { x86_fp80, x86_fp80 }		; <{ x86_fp80, x86_fp80 }*> [#uses=3]
 	%tmp1 = getelementptr { x86_fp80, x86_fp80 }* %z, i32 0, i32 1		; <x86_fp80*> [#uses=1]
 	%tmp2 = load x86_fp80* %tmp1, align 16		; <x86_fp80> [#uses=1]

Modified: llvm/trunk/test/CodeGen/X86/tlv-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/tlv-1.ll?rev=122955&r1=122954&r2=122955&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/tlv-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/tlv-1.ll Thu Jan  6 01:58:36 2011
@@ -10,8 +10,12 @@
   unreachable  
   ; CHECK: movq    _c at TLVP(%rip), %rdi
   ; CHECK-NEXT: callq   *(%rdi)
-  ; CHECK-NEXT: movl    $0, 56(%rax)
-  ; CHECK-NEXT: movq    $0, 48(%rax)
+  ; CHECK-NEXT: pxor	%xmm0, %xmm0
+  ; CHECK-NEXT: movups  %xmm0, 32(%rax)
+  ; CHECK-NEXT: movups  %xmm0, 16(%rax)
+  ; CHECK-NEXT: movups  %xmm0, (%rax)
+  ; CHECK-NEXT: movl $0, 56(%rax)
+  ; CHECK-NEXT: movq $0, 48(%rax)
 }
 
 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind

Modified: llvm/trunk/test/CodeGen/X86/unaligned-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/unaligned-load.ll?rev=122955&r1=122954&r2=122955&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/unaligned-load.ll (original)
+++ llvm/trunk/test/CodeGen/X86/unaligned-load.ll Thu Jan  6 01:58:36 2011
@@ -1,6 +1,4 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin10.0 -mcpu=core2  -relocation-model=dynamic-no-pic --asm-verbose=0   | FileCheck -check-prefix=I386 %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10.0 -mcpu=core2  -relocation-model=dynamic-no-pic --asm-verbose=0 | FileCheck -check-prefix=CORE2 %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10.0 -mcpu=corei7 -relocation-model=dynamic-no-pic --asm-verbose=0 | FileCheck -check-prefix=COREI7 %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10.0 -mcpu=core2  -relocation-model=dynamic-no-pic --asm-verbose=0 | FileCheck %s
 
 @.str1 = internal constant [31 x i8] c"DHRYSTONE PROGRAM, SOME STRING\00", align 8
 @.str3 = internal constant [31 x i8] c"DHRYSTONE PROGRAM, 2'ND STRING\00", align 8
@@ -13,13 +11,8 @@
 bb:
   %String2Loc9 = getelementptr inbounds [31 x i8]* %String2Loc, i64 0, i64 0
   call void @llvm.memcpy.i64(i8* %String2Loc9, i8* getelementptr inbounds ([31 x i8]* @.str3, i64 0, i64 0), i64 31, i32 1)
-; I386: calll {{_?}}memcpy
-
-; CORE2: movabsq
-; CORE2: movabsq
-; CORE2: movabsq
-
-; COREI7: movups _.str3
+; CHECK: movabsq $2325069237881678925, %rax
+; CHECK: movups _.str3(%rip), %xmm0
   br label %bb
 
 return:
@@ -28,9 +21,9 @@
 
 declare void @llvm.memcpy.i64(i8* nocapture, i8* nocapture, i64, i32) nounwind
 
-; CORE2: .section
-; CORE2: .align  4
-; CORE2-NEXT: _.str1:
-; CORE2-NEXT: .asciz "DHRYSTONE PROGRAM, SOME STRING"
-; CORE2: .align 4
-; CORE2-NEXT: _.str3:
+; CHECK: .section
+; CHECK: .align  4
+; CHECK-NEXT: _.str1:
+; CHECK-NEXT: .asciz "DHRYSTONE PROGRAM, SOME STRING"
+; CHECK: .align 4
+; CHECK-NEXT: _.str3:





More information about the llvm-commits mailing list