[llvm] r278215 - [X86][SSE] Regenerate SSE1 tests
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed Aug 10 05:26:40 PDT 2016
Author: rksimon
Date: Wed Aug 10 07:26:40 2016
New Revision: 278215
URL: http://llvm.org/viewvc/llvm-project?rev=278215&view=rev
Log:
[X86][SSE] Regenerate SSE1 tests
Properly demonstrate the nasty codegen we get for vselect without integer vectors
Modified:
llvm/trunk/test/CodeGen/X86/sse1.ll
Modified: llvm/trunk/test/CodeGen/X86/sse1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse1.ll?rev=278215&r1=278214&r2=278215&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse1.ll Wed Aug 10 07:26:40 2016
@@ -1,6 +1,7 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; Tests for SSE1 and below, without SSE2+.
-; RUN: llc < %s -mtriple=i386-unknown-unknown -march=x86 -mcpu=pentium3 -O3 | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -march=x86-64 -mattr=-sse2,+sse -O3 | FileCheck %s
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mcpu=pentium3 -O3 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-sse2,+sse -O3 | FileCheck %s --check-prefix=X64
; PR7993
;define <4 x i32> @test3(<4 x i16> %a) nounwind {
@@ -12,15 +13,25 @@
; vector that this ends up returning.
; rdar://8368414
define <2 x float> @test4(<2 x float> %A, <2 x float> %B) nounwind {
-; CHECK-LABEL: test4:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: movaps %xmm0, %xmm2
-; CHECK-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; CHECK-NEXT: addss %xmm1, %xmm0
-; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; CHECK-NEXT: subss %xmm1, %xmm2
-; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; CHECK-NEXT: ret
+; X32-LABEL: test4:
+; X32: # BB#0: # %entry
+; X32-NEXT: movaps %xmm0, %xmm2
+; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
+; X32-NEXT: addss %xmm1, %xmm0
+; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; X32-NEXT: subss %xmm1, %xmm2
+; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X32-NEXT: retl
+;
+; X64-LABEL: test4:
+; X64: # BB#0: # %entry
+; X64-NEXT: movaps %xmm0, %xmm2
+; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
+; X64-NEXT: addss %xmm1, %xmm0
+; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; X64-NEXT: subss %xmm1, %xmm2
+; X64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X64-NEXT: retq
entry:
%tmp7 = extractelement <2 x float> %A, i32 0
%tmp5 = extractelement <2 x float> %A, i32 1
@@ -40,8 +51,79 @@ entry:
; PR18036
define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) {
-; CHECK-LABEL: vselect:
-; CHECK: ret
+; X32-LABEL: vselect:
+; X32: # BB#0: # %entry
+; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
+; X32-NEXT: xorps %xmm0, %xmm0
+; X32-NEXT: je .LBB1_1
+; X32-NEXT: # BB#2: # %entry
+; X32-NEXT: xorps %xmm1, %xmm1
+; X32-NEXT: jmp .LBB1_3
+; X32-NEXT: .LBB1_1:
+; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X32-NEXT: .LBB1_3: # %entry
+; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB1_4
+; X32-NEXT: # BB#5: # %entry
+; X32-NEXT: xorps %xmm2, %xmm2
+; X32-NEXT: jmp .LBB1_6
+; X32-NEXT: .LBB1_4:
+; X32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X32-NEXT: .LBB1_6: # %entry
+; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
+; X32-NEXT: je .LBB1_7
+; X32-NEXT: # BB#8: # %entry
+; X32-NEXT: xorps %xmm3, %xmm3
+; X32-NEXT: jmp .LBB1_9
+; X32-NEXT: .LBB1_7:
+; X32-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; X32-NEXT: .LBB1_9: # %entry
+; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
+; X32-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; X32-NEXT: jne .LBB1_11
+; X32-NEXT: # BB#10:
+; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-NEXT: .LBB1_11: # %entry
+; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X32-NEXT: retl
+;
+; X64-LABEL: vselect:
+; X64: # BB#0: # %entry
+; X64-NEXT: testl %ecx, %ecx
+; X64-NEXT: xorps %xmm0, %xmm0
+; X64-NEXT: je .LBB1_1
+; X64-NEXT: # BB#2: # %entry
+; X64-NEXT: xorps %xmm1, %xmm1
+; X64-NEXT: jmp .LBB1_3
+; X64-NEXT: .LBB1_1:
+; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-NEXT: .LBB1_3: # %entry
+; X64-NEXT: testl %edx, %edx
+; X64-NEXT: je .LBB1_4
+; X64-NEXT: # BB#5: # %entry
+; X64-NEXT: xorps %xmm2, %xmm2
+; X64-NEXT: jmp .LBB1_6
+; X64-NEXT: .LBB1_4:
+; X64-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X64-NEXT: .LBB1_6: # %entry
+; X64-NEXT: testl %r8d, %r8d
+; X64-NEXT: je .LBB1_7
+; X64-NEXT: # BB#8: # %entry
+; X64-NEXT: xorps %xmm3, %xmm3
+; X64-NEXT: jmp .LBB1_9
+; X64-NEXT: .LBB1_7:
+; X64-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; X64-NEXT: .LBB1_9: # %entry
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; X64-NEXT: jne .LBB1_11
+; X64-NEXT: # BB#10:
+; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: .LBB1_11: # %entry
+; X64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X64-NEXT: retq
entry:
%a1 = icmp eq <4 x i32> %q, zeroinitializer
%a14 = select <4 x i1> %a1, <4 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+0> , <4 x float> zeroinitializer
@@ -51,11 +133,15 @@ entry:
; v4i32 isn't legal for SSE1, but this should be cmpps.
define <4 x float> @PR28044(<4 x float> %a0, <4 x float> %a1) nounwind {
-; CHECK-LABEL: PR28044:
-; CHECK: # BB#0:
-; CHECK-NEXT: cmpeqps %xmm1, %xmm0
-; CHECK-NEXT: ret
+; X32-LABEL: PR28044:
+; X32: # BB#0:
+; X32-NEXT: cmpeqps %xmm1, %xmm0
+; X32-NEXT: retl
;
+; X64-LABEL: PR28044:
+; X64: # BB#0:
+; X64-NEXT: cmpeqps %xmm1, %xmm0
+; X64-NEXT: retq
%cmp = fcmp oeq <4 x float> %a0, %a1
%sext = sext <4 x i1> %cmp to <4 x i32>
%res = bitcast <4 x i32> %sext to <4 x float>
More information about the llvm-commits
mailing list