[llvm] r279630 - [X86][SSE] Regenerate scalar math load folding tests for 32 and 64 bit targets

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 24 08:07:12 PDT 2016


Author: rksimon
Date: Wed Aug 24 10:07:11 2016
New Revision: 279630

URL: http://llvm.org/viewvc/llvm-project?rev=279630&view=rev
Log:
[X86][SSE] Regenerate scalar math load folding tests for 32 and 64 bit targets

Modified:
    llvm/trunk/test/CodeGen/X86/vec_ss_load_fold.ll

Modified: llvm/trunk/test/CodeGen/X86/vec_ss_load_fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_ss_load_fold.ll?rev=279630&r1=279629&r2=279630&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_ss_load_fold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_ss_load_fold.ll Wed Aug 24 10:07:11 2016
@@ -1,21 +1,31 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=+sse,+sse2,+sse4.1 | FileCheck %s
-
-target datalayout = "e-p:32:32"
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=+sse,+sse2,+sse4.1 | FileCheck %s --check-prefix=CHECK --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=+sse,+sse2,+sse4.1 | FileCheck %s --check-prefix=CHECK --check-prefix=X64
 
 define i16 @test1(float %f) nounwind {
-; CHECK-LABEL: test1:
-; CHECK:       ## BB#0:
-; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT:    xorps %xmm1, %xmm1
-; CHECK-NEXT:    subss LCPI0_0, %xmm0
-; CHECK-NEXT:    mulss LCPI0_1, %xmm0
-; CHECK-NEXT:    minss LCPI0_2, %xmm0
-; CHECK-NEXT:    maxss %xmm1, %xmm0
-; CHECK-NEXT:    cvttss2si %xmm0, %eax
-; CHECK-NEXT:    ## kill: %AX<def> %AX<kill> %EAX<kill>
-; CHECK-NEXT:    retl
+; X32-LABEL: test1:
+; X32:       ## BB#0:
+; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-NEXT:    xorps %xmm1, %xmm1
+; X32-NEXT:    subss LCPI0_0, %xmm0
+; X32-NEXT:    mulss LCPI0_1, %xmm0
+; X32-NEXT:    minss LCPI0_2, %xmm0
+; X32-NEXT:    maxss %xmm1, %xmm0
+; X32-NEXT:    cvttss2si %xmm0, %eax
+; X32-NEXT:    ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT:    retl
 ;
+; X64-LABEL: test1:
+; X64:       ## BB#0:
+; X64-NEXT:    xorps %xmm1, %xmm1
+; X64-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; X64-NEXT:    subss {{.*}}(%rip), %xmm0
+; X64-NEXT:    mulss {{.*}}(%rip), %xmm0
+; X64-NEXT:    minss {{.*}}(%rip), %xmm0
+; X64-NEXT:    maxss %xmm1, %xmm0
+; X64-NEXT:    cvttss2si %xmm0, %eax
+; X64-NEXT:    ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT:    retq
   %tmp = insertelement <4 x float> undef, float %f, i32 0		; <<4 x float>> [#uses=1]
   %tmp10 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 1		; <<4 x float>> [#uses=1]
   %tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 2		; <<4 x float>> [#uses=1]
@@ -30,18 +40,28 @@ define i16 @test1(float %f) nounwind {
 }
 
 define i16 @test2(float %f) nounwind {
-; CHECK-LABEL: test2:
-; CHECK:       ## BB#0:
-; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT:    addss LCPI1_0, %xmm0
-; CHECK-NEXT:    mulss LCPI1_1, %xmm0
-; CHECK-NEXT:    minss LCPI1_2, %xmm0
-; CHECK-NEXT:    xorps %xmm1, %xmm1
-; CHECK-NEXT:    maxss %xmm1, %xmm0
-; CHECK-NEXT:    cvttss2si %xmm0, %eax
-; CHECK-NEXT:    ## kill: %AX<def> %AX<kill> %EAX<kill>
-; CHECK-NEXT:    retl
+; X32-LABEL: test2:
+; X32:       ## BB#0:
+; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-NEXT:    addss LCPI1_0, %xmm0
+; X32-NEXT:    mulss LCPI1_1, %xmm0
+; X32-NEXT:    minss LCPI1_2, %xmm0
+; X32-NEXT:    xorps %xmm1, %xmm1
+; X32-NEXT:    maxss %xmm1, %xmm0
+; X32-NEXT:    cvttss2si %xmm0, %eax
+; X32-NEXT:    ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT:    retl
 ;
+; X64-LABEL: test2:
+; X64:       ## BB#0:
+; X64-NEXT:    addss {{.*}}(%rip), %xmm0
+; X64-NEXT:    mulss {{.*}}(%rip), %xmm0
+; X64-NEXT:    minss {{.*}}(%rip), %xmm0
+; X64-NEXT:    xorps %xmm1, %xmm1
+; X64-NEXT:    maxss %xmm1, %xmm0
+; X64-NEXT:    cvttss2si %xmm0, %eax
+; X64-NEXT:    ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT:    retq
   %tmp28 = fsub float %f, 1.000000e+00		; <float> [#uses=1]
   %tmp37 = fmul float %tmp28, 5.000000e-01		; <float> [#uses=1]
   %tmp375 = insertelement <4 x float> undef, float %tmp37, i32 0		; <<4 x float>> [#uses=1]
@@ -67,12 +87,16 @@ declare <4 x float> @llvm.x86.sse41.roun
 declare <4 x float> @f()
 
 define <4 x float> @test3(<4 x float> %A, float *%b, i32 %C) nounwind {
-; CHECK-LABEL: test3:
-; CHECK:       ## BB#0:
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    roundss $4, (%eax), %xmm0
-; CHECK-NEXT:    retl
+; X32-LABEL: test3:
+; X32:       ## BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    roundss $4, (%eax), %xmm0
+; X32-NEXT:    retl
 ;
+; X64-LABEL: test3:
+; X64:       ## BB#0:
+; X64-NEXT:    roundss $4, (%rdi), %xmm0
+; X64-NEXT:    retq
   %a = load float , float *%b
   %B = insertelement <4 x float> undef, float %a, i32 0
   %X = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %A, <4 x float> %B, i32 4)
@@ -80,18 +104,28 @@ define <4 x float> @test3(<4 x float> %A
 }
 
 define <4 x float> @test4(<4 x float> %A, float *%b, i32 %C) nounwind {
-; CHECK-LABEL: test4:
-; CHECK:       ## BB#0:
-; CHECK-NEXT:    subl $28, %esp
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT:    movaps %xmm0, (%esp) ## 16-byte Spill
-; CHECK-NEXT:    calll _f
-; CHECK-NEXT:    movaps (%esp), %xmm1 ## 16-byte Reload
-; CHECK-NEXT:    roundss $4, %xmm1, %xmm0
-; CHECK-NEXT:    addl $28, %esp
-; CHECK-NEXT:    retl
+; X32-LABEL: test4:
+; X32:       ## BB#0:
+; X32-NEXT:    subl $28, %esp
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-NEXT:    movaps %xmm0, (%esp) ## 16-byte Spill
+; X32-NEXT:    calll _f
+; X32-NEXT:    movaps (%esp), %xmm1 ## 16-byte Reload
+; X32-NEXT:    roundss $4, %xmm1, %xmm0
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    retl
 ;
+; X64-LABEL: test4:
+; X64:       ## BB#0:
+; X64-NEXT:    subq $24, %rsp
+; X64-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
+; X64-NEXT:    callq _f
+; X64-NEXT:    movaps (%rsp), %xmm1 ## 16-byte Reload
+; X64-NEXT:    roundss $4, %xmm1, %xmm0
+; X64-NEXT:    addq $24, %rsp
+; X64-NEXT:    retq
   %a = load float , float *%b
   %B = insertelement <4 x float> undef, float %a, i32 0
   %q = call <4 x float> @f()
@@ -101,16 +135,21 @@ define <4 x float> @test4(<4 x float> %A
 
 ; PR13576
 define  <2 x double> @test5() nounwind uwtable readnone noinline {
-; CHECK-LABEL: test5:
-; CHECK:       ## BB#0: ## %entry
-; CHECK-NEXT:    movaps {{.*#+}} xmm0 = [4.569870e+02,1.233210e+02]
-; CHECK-NEXT:    movl $128, %eax
-; CHECK-NEXT:    cvtsi2sdl %eax, %xmm0
-; CHECK-NEXT:    retl
+; X32-LABEL: test5:
+; X32:       ## BB#0: ## %entry
+; X32-NEXT:    movaps {{.*#+}} xmm0 = [4.569870e+02,1.233210e+02]
+; X32-NEXT:    movl $128, %eax
+; X32-NEXT:    cvtsi2sdl %eax, %xmm0
+; X32-NEXT:    retl
 ;
+; X64-LABEL: test5:
+; X64:       ## BB#0: ## %entry
+; X64-NEXT:    movaps {{.*#+}} xmm0 = [4.569870e+02,1.233210e+02]
+; X64-NEXT:    movl $128, %eax
+; X64-NEXT:    cvtsi2sdl %eax, %xmm0
+; X64-NEXT:    retq
 entry:
-  %0 = tail call <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double> <double
-4.569870e+02, double 1.233210e+02>, i32 128) nounwind readnone
+  %0 = tail call <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double> <double 4.569870e+02, double 1.233210e+02>, i32 128) nounwind readnone
   ret <2 x double> %0
 }
 




More information about the llvm-commits mailing list