[llvm] r218947 - [x86] Cleanup and generate precise FileCheck assertions for a bunch of
Chandler Carruth
chandlerc at gmail.com
Thu Oct 2 18:37:59 PDT 2014
Author: chandlerc
Date: Thu Oct 2 20:37:58 2014
New Revision: 218947
URL: http://llvm.org/viewvc/llvm-project?rev=218947&view=rev
Log:
[x86] Cleanup and generate precise FileCheck assertions for a bunch of
SSE tests.
Modified:
llvm/trunk/test/CodeGen/X86/sse-align-12.ll
llvm/trunk/test/CodeGen/X86/sse2.ll
llvm/trunk/test/CodeGen/X86/sse3.ll
llvm/trunk/test/CodeGen/X86/sse41.ll
Modified: llvm/trunk/test/CodeGen/X86/sse-align-12.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-align-12.ll?rev=218947&r1=218946&r2=218947&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-align-12.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-align-12.ll Thu Oct 2 20:37:58 2014
@@ -1,9 +1,11 @@
; RUN: llc < %s -march=x86-64 -mcpu=nehalem | FileCheck %s
-; CHECK-LABEL: a:
-; CHECK: movdqu
-; CHECK: pshufd
define <4 x float> @a(<4 x float>* %y) nounwind {
+; CHECK-LABEL: a:
+; CHECK: # BB#0:
+; CHECK-NEXT: movdqu (%rdi), %xmm0
+; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,2,1,0]
+; CHECK-NEXT: retq
%x = load <4 x float>* %y, align 4
%a = extractelement <4 x float> %x, i32 0
%b = extractelement <4 x float> %x, i32 1
@@ -16,10 +18,12 @@ define <4 x float> @a(<4 x float>* %y) n
ret <4 x float> %s
}
-; CHECK-LABEL: b:
-; CHECK: movups
-; CHECK: unpckhps
define <4 x float> @b(<4 x float>* %y, <4 x float> %z) nounwind {
+; CHECK-LABEL: b:
+; CHECK: # BB#0:
+; CHECK-NEXT: movups (%rdi), %xmm1
+; CHECK-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; CHECK-NEXT: retq
%x = load <4 x float>* %y, align 4
%a = extractelement <4 x float> %x, i32 2
%b = extractelement <4 x float> %x, i32 3
@@ -32,10 +36,12 @@ define <4 x float> @b(<4 x float>* %y, <
ret <4 x float> %s
}
-; CHECK-LABEL: c:
-; CHECK: movupd
-; CHECK: shufpd
define <2 x double> @c(<2 x double>* %y) nounwind {
+; CHECK-LABEL: c:
+; CHECK: # BB#0:
+; CHECK-NEXT: movupd (%rdi), %xmm0
+; CHECK-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0]
+; CHECK-NEXT: retq
%x = load <2 x double>* %y, align 8
%a = extractelement <2 x double> %x, i32 0
%c = extractelement <2 x double> %x, i32 1
@@ -44,10 +50,12 @@ define <2 x double> @c(<2 x double>* %y)
ret <2 x double> %r
}
-; CHECK-LABEL: d:
-; CHECK: movupd
-; CHECK: unpckhpd
define <2 x double> @d(<2 x double>* %y, <2 x double> %z) nounwind {
+; CHECK-LABEL: d:
+; CHECK: # BB#0:
+; CHECK-NEXT: movupd (%rdi), %xmm1
+; CHECK-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; CHECK-NEXT: retq
%x = load <2 x double>* %y, align 8
%a = extractelement <2 x double> %x, i32 1
%c = extractelement <2 x double> %z, i32 1
Modified: llvm/trunk/test/CodeGen/X86/sse2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2.ll?rev=218947&r1=218946&r2=218947&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2.ll Thu Oct 2 20:37:58 2014
@@ -2,39 +2,48 @@
; RUN: llc < %s -mtriple=i386-apple-darwin10 -mcpu=pentium4 -O3 | FileCheck %s
define void @test1(<2 x double>* %r, <2 x double>* %A, double %B) nounwind {
+; CHECK-LABEL: test1:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movapd (%ecx), %xmm0
+; CHECK-NEXT: movlpd {{[0-9]+}}(%esp), %xmm0
+; CHECK-NEXT: movapd %xmm0, (%eax)
+; CHECK-NEXT: retl
%tmp3 = load <2 x double>* %A, align 16
%tmp7 = insertelement <2 x double> undef, double %B, i32 0
%tmp9 = shufflevector <2 x double> %tmp3, <2 x double> %tmp7, <2 x i32> < i32 2, i32 1 >
store <2 x double> %tmp9, <2 x double>* %r, align 16
ret void
-
-; CHECK-LABEL: test1:
-; CHECK: movl 4(%esp), %eax
-; CHECK-NEXT: movl 8(%esp), %ecx
-; CHECK-NEXT: movapd (%ecx), %xmm0
-; CHECK-NEXT: movlpd 12(%esp), %xmm0
-; CHECK-NEXT: movapd %xmm0, (%eax)
-; CHECK-NEXT: ret
}
define void @test2(<2 x double>* %r, <2 x double>* %A, double %B) nounwind {
+; CHECK-LABEL: test2:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movapd (%ecx), %xmm0
+; CHECK-NEXT: movhpd {{[0-9]+}}(%esp), %xmm0
+; CHECK-NEXT: movapd %xmm0, (%eax)
+; CHECK-NEXT: retl
%tmp3 = load <2 x double>* %A, align 16
%tmp7 = insertelement <2 x double> undef, double %B, i32 0
%tmp9 = shufflevector <2 x double> %tmp3, <2 x double> %tmp7, <2 x i32> < i32 0, i32 2 >
store <2 x double> %tmp9, <2 x double>* %r, align 16
ret void
-
-; CHECK-LABEL: test2:
-; CHECK: movl 4(%esp), %eax
-; CHECK: movl 8(%esp), %ecx
-; CHECK-NEXT: movapd (%ecx), %xmm0
-; CHECK-NEXT: movhpd 12(%esp), %xmm0
-; CHECK-NEXT: movapd %xmm0, (%eax)
-; CHECK-NEXT: ret
}
define void @test3(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B) nounwind {
+; CHECK-LABEL: test3:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT: movaps (%edx), %xmm0
+; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; CHECK-NEXT: movaps %xmm0, (%eax)
+; CHECK-NEXT: retl
%tmp = load <4 x float>* %B ; <<4 x float>> [#uses=2]
%tmp3 = load <4 x float>* %A ; <<4 x float>> [#uses=2]
%tmp.upgrd.1 = extractelement <4 x float> %tmp3, i32 0 ; <float> [#uses=1]
@@ -47,24 +56,30 @@ define void @test3(<4 x float>* %res, <4
%tmp13 = insertelement <4 x float> %tmp12, float %tmp9, i32 3 ; <<4 x float>> [#uses=1]
store <4 x float> %tmp13, <4 x float>* %res
ret void
-; CHECK: @test3
-; CHECK: unpcklps
}
define void @test4(<4 x float> %X, <4 x float>* %res) nounwind {
+; CHECK-LABEL: test4:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,3,0]
+; CHECK-NEXT: movdqa %xmm0, (%eax)
+; CHECK-NEXT: retl
%tmp5 = shufflevector <4 x float> %X, <4 x float> undef, <4 x i32> < i32 2, i32 6, i32 3, i32 7 > ; <<4 x float>> [#uses=1]
store <4 x float> %tmp5, <4 x float>* %res
ret void
-; CHECK: @test4
-; CHECK: pshufd $50, %xmm0, %xmm0
}
define <4 x i32> @test5(i8** %ptr) nounwind {
; CHECK-LABEL: test5:
-; CHECK: pxor
-; CHECK: punpcklbw
-; CHECK: punpcklwd
-
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl (%eax), %eax
+; CHECK-NEXT: movss (%eax), %xmm1
+; CHECK-NEXT: pxor %xmm0, %xmm0
+; CHECK-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; CHECK-NEXT: retl
%tmp = load i8** %ptr ; <i8*> [#uses=1]
%tmp.upgrd.1 = bitcast i8* %tmp to float* ; <float*> [#uses=1]
%tmp.upgrd.2 = load float* %tmp.upgrd.1 ; <float> [#uses=1]
@@ -81,30 +96,39 @@ define <4 x i32> @test5(i8** %ptr) nounw
}
define void @test6(<4 x float>* %res, <4 x float>* %A) nounwind {
- %tmp1 = load <4 x float>* %A ; <<4 x float>> [#uses=1]
- %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp2, <4 x float>* %res
- ret void
-
; CHECK-LABEL: test6:
-; CHECK: movaps (%ecx), %xmm0
-; CHECK: movaps %xmm0, (%eax)
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movaps (%ecx), %xmm0
+; CHECK-NEXT: movaps %xmm0, (%eax)
+; CHECK-NEXT: retl
+ %tmp1 = load <4 x float>* %A ; <<4 x float>> [#uses=1]
+ %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>> [#uses=1]
+ store <4 x float> %tmp2, <4 x float>* %res
+ ret void
}
define void @test7() nounwind {
- bitcast <4 x i32> zeroinitializer to <4 x float> ; <<4 x float>>:1 [#uses=1]
- shufflevector <4 x float> %1, <4 x float> zeroinitializer, <4 x i32> zeroinitializer ; <<4 x float>>:2 [#uses=1]
- store <4 x float> %2, <4 x float>* null
- ret void
-
; CHECK-LABEL: test7:
-; CHECK: xorps %xmm0, %xmm0
-; CHECK: movaps %xmm0, 0
+; CHECK: ## BB#0:
+; CHECK-NEXT: xorps %xmm0, %xmm0
+; CHECK-NEXT: movaps %xmm0, 0
+; CHECK-NEXT: retl
+ bitcast <4 x i32> zeroinitializer to <4 x float> ; <<4 x float>>:1 [#uses=1]
+ shufflevector <4 x float> %1, <4 x float> zeroinitializer, <4 x i32> zeroinitializer ; <<4 x float>>:2 [#uses=1]
+ store <4 x float> %2, <4 x float>* null
+ ret void
}
@x = external global [4 x i32]
define <2 x i64> @test8() nounwind {
+; CHECK-LABEL: test8:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl L_x$non_lazy_ptr, %eax
+; CHECK-NEXT: movups (%eax), %xmm0
+; CHECK-NEXT: retl
%tmp = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 0) ; <i32> [#uses=1]
%tmp3 = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 1) ; <i32> [#uses=1]
%tmp5 = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 2) ; <i32> [#uses=1]
@@ -115,90 +139,123 @@ define <2 x i64> @test8() nounwind {
%tmp15 = insertelement <4 x i32> %tmp14, i32 %tmp7, i32 3 ; <<4 x i32>> [#uses=1]
%tmp16 = bitcast <4 x i32> %tmp15 to <2 x i64> ; <<2 x i64>> [#uses=1]
ret <2 x i64> %tmp16
-; CHECK-LABEL: test8:
-; CHECK: movups (%eax), %xmm0
}
define <4 x float> @test9(i32 %dummy, float %a, float %b, float %c, float %d) nounwind {
+; CHECK-LABEL: test9:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movups {{[0-9]+}}(%esp), %xmm0
+; CHECK-NEXT: retl
%tmp = insertelement <4 x float> undef, float %a, i32 0 ; <<4 x float>> [#uses=1]
%tmp11 = insertelement <4 x float> %tmp, float %b, i32 1 ; <<4 x float>> [#uses=1]
%tmp12 = insertelement <4 x float> %tmp11, float %c, i32 2 ; <<4 x float>> [#uses=1]
%tmp13 = insertelement <4 x float> %tmp12, float %d, i32 3 ; <<4 x float>> [#uses=1]
ret <4 x float> %tmp13
-; CHECK-LABEL: test9:
-; CHECK: movups 8(%esp), %xmm0
}
define <4 x float> @test10(float %a, float %b, float %c, float %d) nounwind {
+; CHECK-LABEL: test10:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movaps {{[0-9]+}}(%esp), %xmm0
+; CHECK-NEXT: retl
%tmp = insertelement <4 x float> undef, float %a, i32 0 ; <<4 x float>> [#uses=1]
%tmp11 = insertelement <4 x float> %tmp, float %b, i32 1 ; <<4 x float>> [#uses=1]
%tmp12 = insertelement <4 x float> %tmp11, float %c, i32 2 ; <<4 x float>> [#uses=1]
%tmp13 = insertelement <4 x float> %tmp12, float %d, i32 3 ; <<4 x float>> [#uses=1]
ret <4 x float> %tmp13
-; CHECK-LABEL: test10:
-; CHECK: movaps 4(%esp), %xmm0
}
define <2 x double> @test11(double %a, double %b) nounwind {
+; CHECK-LABEL: test11:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movaps {{[0-9]+}}(%esp), %xmm0
+; CHECK-NEXT: retl
%tmp = insertelement <2 x double> undef, double %a, i32 0 ; <<2 x double>> [#uses=1]
%tmp7 = insertelement <2 x double> %tmp, double %b, i32 1 ; <<2 x double>> [#uses=1]
ret <2 x double> %tmp7
-; CHECK-LABEL: test11:
-; CHECK: movaps 4(%esp), %xmm0
}
define void @test12() nounwind {
- %tmp1 = load <4 x float>* null ; <<4 x float>> [#uses=2]
- %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 >, <4 x i32> < i32 0, i32 1, i32 6, i32 7 > ; <<4 x float>> [#uses=1]
- %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> zeroinitializer, <4 x i32> < i32 2, i32 3, i32 6, i32 7 > ; <<4 x float>> [#uses=1]
- %tmp4 = fadd <4 x float> %tmp2, %tmp3 ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp4, <4 x float>* null
- ret void
; CHECK-LABEL: test12:
-; CHECK: movhlps
-; CHECK: shufps
+; CHECK: ## BB#0:
+; CHECK-NEXT: movaps 0, %xmm0
+; CHECK-NEXT: xorps %xmm1, %xmm1
+; CHECK-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
+; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
+; CHECK-NEXT: addps %xmm1, %xmm0
+; CHECK-NEXT: movaps %xmm0, 0
+; CHECK-NEXT: retl
+ %tmp1 = load <4 x float>* null ; <<4 x float>> [#uses=2]
+ %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 >, <4 x i32> < i32 0, i32 1, i32 6, i32 7 > ; <<4 x float>> [#uses=1]
+ %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> zeroinitializer, <4 x i32> < i32 2, i32 3, i32 6, i32 7 > ; <<4 x float>> [#uses=1]
+ %tmp4 = fadd <4 x float> %tmp2, %tmp3 ; <<4 x float>> [#uses=1]
+ store <4 x float> %tmp4, <4 x float>* null
+ ret void
}
define void @test13(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
- %tmp3 = load <4 x float>* %B ; <<4 x float>> [#uses=1]
- %tmp5 = load <4 x float>* %C ; <<4 x float>> [#uses=1]
- %tmp11 = shufflevector <4 x float> %tmp3, <4 x float> %tmp5, <4 x i32> < i32 1, i32 4, i32 1, i32 5 > ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp11, <4 x float>* %res
- ret void
-; CHECK: test13
-; CHECK: shufps $69, (%ecx), %xmm0
-; CHECK: pshufd $-40, %xmm0, %xmm0
+; CHECK-LABEL: test13:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT: movaps (%edx), %xmm0
+; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],mem[0,1]
+; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; CHECK-NEXT: movdqa %xmm0, (%eax)
+; CHECK-NEXT: retl
+ %tmp3 = load <4 x float>* %B ; <<4 x float>> [#uses=1]
+ %tmp5 = load <4 x float>* %C ; <<4 x float>> [#uses=1]
+ %tmp11 = shufflevector <4 x float> %tmp3, <4 x float> %tmp5, <4 x i32> < i32 1, i32 4, i32 1, i32 5 > ; <<4 x float>> [#uses=1]
+ store <4 x float> %tmp11, <4 x float>* %res
+ ret void
}
define <4 x float> @test14(<4 x float>* %x, <4 x float>* %y) nounwind {
- %tmp = load <4 x float>* %y ; <<4 x float>> [#uses=2]
- %tmp5 = load <4 x float>* %x ; <<4 x float>> [#uses=2]
- %tmp9 = fadd <4 x float> %tmp5, %tmp ; <<4 x float>> [#uses=1]
- %tmp21 = fsub <4 x float> %tmp5, %tmp ; <<4 x float>> [#uses=1]
- %tmp27 = shufflevector <4 x float> %tmp9, <4 x float> %tmp21, <4 x i32> < i32 0, i32 1, i32 4, i32 5 > ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp27
; CHECK-LABEL: test14:
-; CHECK: addps [[X1:%xmm[0-9]+]], [[X0:%xmm[0-9]+]]
-; CHECK: subps [[X1]], [[X2:%xmm[0-9]+]]
-; CHECK: movlhps [[X2]], [[X0]]
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movaps (%ecx), %xmm1
+; CHECK-NEXT: movaps (%eax), %xmm2
+; CHECK-NEXT: movaps %xmm2, %xmm0
+; CHECK-NEXT: addps %xmm1, %xmm0
+; CHECK-NEXT: subps %xmm1, %xmm2
+; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; CHECK-NEXT: retl
+ %tmp = load <4 x float>* %y ; <<4 x float>> [#uses=2]
+ %tmp5 = load <4 x float>* %x ; <<4 x float>> [#uses=2]
+ %tmp9 = fadd <4 x float> %tmp5, %tmp ; <<4 x float>> [#uses=1]
+ %tmp21 = fsub <4 x float> %tmp5, %tmp ; <<4 x float>> [#uses=1]
+ %tmp27 = shufflevector <4 x float> %tmp9, <4 x float> %tmp21, <4 x i32> < i32 0, i32 1, i32 4, i32 5 > ; <<4 x float>> [#uses=1]
+ ret <4 x float> %tmp27
}
define <4 x float> @test15(<4 x float>* %x, <4 x float>* %y) nounwind {
-entry:
- %tmp = load <4 x float>* %y ; <<4 x float>> [#uses=1]
- %tmp3 = load <4 x float>* %x ; <<4 x float>> [#uses=1]
- %tmp4 = shufflevector <4 x float> %tmp3, <4 x float> %tmp, <4 x i32> < i32 2, i32 3, i32 6, i32 7 > ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp4
; CHECK-LABEL: test15:
-; CHECK: movhlps %xmm1, %xmm0
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movaps (%ecx), %xmm0
+; CHECK-NEXT: movaps (%eax), %xmm1
+; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
+; CHECK-NEXT: retl
+entry:
+ %tmp = load <4 x float>* %y ; <<4 x float>> [#uses=1]
+ %tmp3 = load <4 x float>* %x ; <<4 x float>> [#uses=1]
+ %tmp4 = shufflevector <4 x float> %tmp3, <4 x float> %tmp, <4 x i32> < i32 2, i32 3, i32 6, i32 7 > ; <<4 x float>> [#uses=1]
+ ret <4 x float> %tmp4
}
; PR8900
-; CHECK-LABEL: test16:
-; CHECK: unpcklpd
-; CHECK: ret
define <2 x double> @test16(<4 x double> * nocapture %srcA, <2 x double>* nocapture %dst) {
+; CHECK-LABEL: test16:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movapd 96(%eax), %xmm0
+; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT: retl
%i5 = getelementptr inbounds <4 x double>* %srcA, i32 3
%i6 = load <4 x double>* %i5, align 32
%i7 = shufflevector <4 x double> %i6, <4 x double> undef, <2 x i32> <i32 0, i32 2>
@@ -207,6 +264,11 @@ define <2 x double> @test16(<4 x double
; PR9009
define fastcc void @test17() nounwind {
+; CHECK-LABEL: test17:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: movaps {{.*#+}} xmm0 = <u,u,32768,32768>
+; CHECK-NEXT: movaps %xmm0, (%eax)
+; CHECK-NEXT: retl
entry:
%0 = insertelement <4 x i32> undef, i32 undef, i32 1
%1 = shufflevector <4 x i32> <i32 undef, i32 undef, i32 32768, i32 32768>, <4 x i32> %0, <4 x i32> <i32 4, i32 5, i32 2, i32 3>
@@ -217,25 +279,57 @@ entry:
; PR9210
define <4 x float> @f(<4 x double>) nounwind {
+; CHECK-LABEL: f:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: cvtpd2ps %xmm1, %xmm1
+; CHECK-NEXT: cvtpd2ps %xmm0, %xmm0
+; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: retl
entry:
%double2float.i = fptrunc <4 x double> %0 to <4 x float>
ret <4 x float> %double2float.i
}
define <2 x i64> @test_insert_64_zext(<2 x i64> %i) {
-; CHECK-LABEL: test_insert_64_zext
-; CHECK-NOT: xor
-; CHECK: movq
+; CHECK-LABEL: test_insert_64_zext:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movq %xmm0, %xmm0
+; CHECK-NEXT: retl
%1 = shufflevector <2 x i64> %i, <2 x i64> <i64 0, i64 undef>, <2 x i32> <i32 0, i32 2>
ret <2 x i64> %1
}
define <4 x i32> @PR19721(<4 x i32> %i) {
+; CHECK-LABEL: PR19721:
+; CHECK: ## BB#0:
+; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,0,0]
+; CHECK-NEXT: movd %xmm1, %eax
+; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,0,0,0]
+; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; CHECK-NEXT: movd %xmm0, %ecx
+; CHECK-NEXT: movd %xmm1, %edx
+; CHECK-NEXT: movd %edx, %xmm0
+; CHECK-NEXT: movd %ecx, %xmm1
+; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT: movd %eax, %xmm0
+; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,1]
+; CHECK-NEXT: retl
%bc = bitcast <4 x i32> %i to i128
%insert = and i128 %bc, -4294967296
%bc2 = bitcast i128 %insert to <4 x i32>
ret <4 x i32> %bc2
+}
-; CHECK-LABEL: PR19721
-; CHECK: punpckldq
+define <4 x i32> @test_mul(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: test_mul:
+; CHECK: ## BB#0:
+; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,0,3,0]
+; CHECK-NEXT: pmuludq %xmm1, %xmm0
+; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,0,3,0]
+; CHECK-NEXT: pmuludq %xmm2, %xmm1
+; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; CHECK-NEXT: retl
+ %m = mul <4 x i32> %x, %y
+ ret <4 x i32> %m
}
Modified: llvm/trunk/test/CodeGen/X86/sse3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse3.ll?rev=218947&r1=218946&r2=218947&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse3.ll Thu Oct 2 20:37:58 2014
@@ -1,12 +1,17 @@
; These are tests for SSE3 codegen.
-; RUN: llc < %s -march=x86-64 -mcpu=nocona -mtriple=i686-apple-darwin9 -O3 \
-; RUN: | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -march=x86-64 -mcpu=nocona -mtriple=i686-apple-darwin9 -O3 | FileCheck %s --check-prefix=X64
; Test for v8xi16 lowering where we extract the first element of the vector and
; placed it in the second element of the result.
define void @t0(<8 x i16>* %dest, <8 x i16>* %old) nounwind {
+; X64-LABEL: t0:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: movdqa (%rsi), %xmm0
+; X64-NEXT: pslldq $2, %xmm0
+; X64-NEXT: movdqa %xmm0, (%rdi)
+; X64-NEXT: retq
entry:
%tmp3 = load <8 x i16>* %old
%tmp6 = shufflevector <8 x i16> %tmp3,
@@ -15,85 +20,94 @@ entry:
store <8 x i16> %tmp6, <8 x i16>* %dest
ret void
-; X64-LABEL: t0:
-; X64: movdqa (%rsi), %xmm0
-; X64: pslldq $2, %xmm0
-; X64: movdqa %xmm0, (%rdi)
-; X64: ret
}
define <8 x i16> @t1(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+; X64-LABEL: t1:
+; X64: ## BB#0:
+; X64-NEXT: movdqa (%rdi), %xmm0
+; X64-NEXT: pinsrw $0, (%rsi), %xmm0
+; X64-NEXT: retq
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> < i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7 >
ret <8 x i16> %tmp3
-; X64-LABEL: t1:
-; X64: movdqa (%rdi), %xmm0
-; X64: pinsrw $0, (%rsi), %xmm0
-; X64: ret
}
define <8 x i16> @t2(<8 x i16> %A, <8 x i16> %B) nounwind {
+; X64-LABEL: t2:
+; X64: ## BB#0:
+; X64-NEXT: pextrw $1, %xmm1, %eax
+; X64-NEXT: pinsrw $0, %eax, %xmm0
+; X64-NEXT: pinsrw $3, %eax, %xmm0
+; X64-NEXT: retq
%tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 9, i32 1, i32 2, i32 9, i32 4, i32 5, i32 6, i32 7 >
ret <8 x i16> %tmp
-; X64-LABEL: t2:
-; X64: pextrw $1, %xmm1, %eax
-; X64: pinsrw $0, %eax, %xmm0
-; X64: pinsrw $3, %eax, %xmm0
-; X64: ret
}
define <8 x i16> @t3(<8 x i16> %A, <8 x i16> %B) nounwind {
+; X64-LABEL: t3:
+; X64: ## BB#0:
+; X64-NEXT: pextrw $5, %xmm0, %eax
+; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,0,4,5,6,7]
+; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; X64-NEXT: pinsrw $3, %eax, %xmm0
+; X64-NEXT: retq
%tmp = shufflevector <8 x i16> %A, <8 x i16> %A, <8 x i32> < i32 8, i32 3, i32 2, i32 13, i32 7, i32 6, i32 5, i32 4 >
ret <8 x i16> %tmp
-; X64-LABEL: t3:
-; X64: pextrw $5, %xmm0, %eax
-; X64: pshuflw $44, %xmm0, %xmm0
-; X64: pshufhw $27, %xmm0, %xmm0
-; X64: pinsrw $3, %eax, %xmm0
-; X64: ret
}
define <8 x i16> @t4(<8 x i16> %A, <8 x i16> %B) nounwind {
+; X64-LABEL: t4:
+; X64: ## BB#0:
+; X64-NEXT: pextrw $7, %xmm0, %eax
+; X64-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,6,5]
+; X64-NEXT: pinsrw $1, %eax, %xmm1
+; X64-NEXT: pextrw $1, %xmm0, %eax
+; X64-NEXT: pinsrw $4, %eax, %xmm1
+; X64-NEXT: movdqa %xmm1, %xmm0
+; X64-NEXT: retq
%tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 0, i32 7, i32 2, i32 3, i32 1, i32 5, i32 6, i32 5 >
ret <8 x i16> %tmp
-; X64-LABEL: t4:
-; X64: pextrw $7, [[XMM0:%xmm[0-9]+]], %eax
-; X64: pshufhw $100, [[XMM0]], [[XMM1:%xmm[0-9]+]]
-; X64: pinsrw $1, %eax, [[XMM1]]
-; X64: pextrw $1, [[XMM0]], %eax
-; X64: pinsrw $4, %eax, %xmm{{[0-9]}}
-; X64: ret
}
define <8 x i16> @t5(<8 x i16> %A, <8 x i16> %B) nounwind {
+; X64-LABEL: t5:
+; X64: ## BB#0:
+; X64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,3,1]
+; X64-NEXT: retq
%tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 8, i32 9, i32 0, i32 1, i32 10, i32 11, i32 2, i32 3 >
ret <8 x i16> %tmp
-; X64: t5:
-; X64: movlhps %xmm1, %xmm0
-; X64: pshufd $114, %xmm0, %xmm0
-; X64: ret
}
define <8 x i16> @t6(<8 x i16> %A, <8 x i16> %B) nounwind {
+; X64-LABEL: t6:
+; X64: ## BB#0:
+; X64-NEXT: movss %xmm1, %xmm0
+; X64-NEXT: retq
%tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 8, i32 9, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7 >
ret <8 x i16> %tmp
-; X64: t6:
-; X64: movss %xmm1, %xmm0
-; X64: ret
}
define <8 x i16> @t7(<8 x i16> %A, <8 x i16> %B) nounwind {
+; X64-LABEL: t7:
+; X64: ## BB#0:
+; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,3,2,4,5,6,7]
+; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,4,7]
+; X64-NEXT: retq
%tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 0, i32 0, i32 3, i32 2, i32 4, i32 6, i32 4, i32 7 >
ret <8 x i16> %tmp
-; X64: t7:
-; X64: pshuflw $-80, %xmm0, %xmm0
-; X64: pshufhw $-56, %xmm0, %xmm0
-; X64: ret
}
define void @t8(<2 x i64>* %res, <2 x i64>* %A) nounwind {
+; X64-LABEL: t8:
+; X64: ## BB#0:
+; X64-NEXT: pshuflw {{.*#+}} xmm0 = mem[2,1,0,3,4,5,6,7]
+; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
+; X64-NEXT: movdqa %xmm0, (%rdi)
+; X64-NEXT: retq
%tmp = load <2 x i64>* %A
%tmp.upgrd.1 = bitcast <2 x i64> %tmp to <8 x i16>
%tmp0 = extractelement <8 x i16> %tmp.upgrd.1, i32 0
@@ -115,14 +129,15 @@ define void @t8(<2 x i64>* %res, <2 x i6
%tmp15.upgrd.2 = bitcast <8 x i16> %tmp15 to <2 x i64>
store <2 x i64> %tmp15.upgrd.2, <2 x i64>* %res
ret void
-; X64: t8:
-; X64: pshuflw $-58, (%rsi), %xmm0
-; X64: pshufhw $-58, %xmm0, %xmm0
-; X64: movdqa %xmm0, (%rdi)
-; X64: ret
}
define void @t9(<4 x float>* %r, <2 x i32>* %A) nounwind {
+; X64-LABEL: t9:
+; X64: ## BB#0:
+; X64-NEXT: movaps (%rdi), %xmm0
+; X64-NEXT: movhps (%rsi), %xmm0
+; X64-NEXT: movaps %xmm0, (%rdi)
+; X64-NEXT: retq
%tmp = load <4 x float>* %r
%tmp.upgrd.3 = bitcast <2 x i32>* %A to double*
%tmp.upgrd.4 = load double* %tmp.upgrd.3
@@ -139,11 +154,6 @@ define void @t9(<4 x float>* %r, <2 x i3
%tmp13 = insertelement <4 x float> %tmp12, float %tmp9, i32 3
store <4 x float> %tmp13, <4 x float>* %r
ret void
-; X64: t9:
-; X64: movaps (%rdi), %xmm0
-; X64: movhps (%rsi), %xmm0
-; X64: movaps %xmm0, (%rdi)
-; X64: ret
}
@@ -154,113 +164,123 @@ define void @t9(<4 x float>* %r, <2 x i3
@g1 = external constant <4 x i32>
@g2 = external constant <4 x i16>
-define internal void @t10() nounwind {
- load <4 x i32>* @g1, align 16
- bitcast <4 x i32> %1 to <8 x i16>
- shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> < i32 0, i32 2, i32 4, i32 6, i32 undef, i32 undef, i32 undef, i32 undef >
- bitcast <8 x i16> %3 to <2 x i64>
- extractelement <2 x i64> %4, i32 0
- bitcast i64 %5 to <4 x i16>
- store <4 x i16> %6, <4 x i16>* @g2, align 8
- ret void
-; X64: t10:
-; X64: pextrw $4, [[X0:%xmm[0-9]+]], %e{{..}}
-; X64: pextrw $6, [[X0]], %e{{..}}
-; X64: movlhps [[X0]], [[X0]]
-; X64: pshuflw $8, [[X0]], [[X0]]
-; X64: pinsrw $2, %e{{..}}, [[X0]]
-; X64: pinsrw $3, %e{{..}}, [[X0]]
+define void @t10() nounwind {
+; X64-LABEL: t10:
+; X64: ## BB#0:
+; X64-NEXT: movq _g1@{{.*}}(%rip), %rax
+; X64-NEXT: movdqa (%rax), %xmm0
+; X64-NEXT: pextrw $4, %xmm0, %eax
+; X64-NEXT: pextrw $6, %xmm0, %ecx
+; X64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,0,0,4,5,6,7]
+; X64-NEXT: pinsrw $2, %eax, %xmm0
+; X64-NEXT: pinsrw $3, %ecx, %xmm0
+; X64-NEXT: movq _g2@{{.*}}(%rip), %rax
+; X64-NEXT: movq %xmm0, (%rax)
+; X64-NEXT: retq
+ load <4 x i32>* @g1, align 16
+ bitcast <4 x i32> %1 to <8 x i16>
+ shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> < i32 0, i32 2, i32 4, i32 6, i32 undef, i32 undef, i32 undef, i32 undef >
+ bitcast <8 x i16> %3 to <2 x i64>
+ extractelement <2 x i64> %4, i32 0
+ bitcast i64 %5 to <4 x i16>
+ store <4 x i16> %6, <4 x i16>* @g2, align 8
+ ret void
}
-
; Pack various elements via shuffles.
define <8 x i16> @t11(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
+; X64-LABEL: t11:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: movd %xmm1, %eax
+; X64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,0,0,4,5,6,7]
+; X64-NEXT: pinsrw $1, %eax, %xmm0
+; X64-NEXT: retq
entry:
%tmp7 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 1, i32 8, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
ret <8 x i16> %tmp7
-; X64-LABEL: t11:
-; X64: movd %xmm1, %eax
-; X64: movlhps %xmm0, %xmm0
-; X64: pshuflw $1, %xmm0, %xmm0
-; X64: pinsrw $1, %eax, %xmm0
-; X64: ret
}
-
define <8 x i16> @t12(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
+; X64-LABEL: t12:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: pextrw $3, %xmm1, %eax
+; X64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,4,4,4]
+; X64-NEXT: pinsrw $5, %eax, %xmm0
+; X64-NEXT: retq
entry:
%tmp9 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 0, i32 1, i32 undef, i32 undef, i32 3, i32 11, i32 undef , i32 undef >
ret <8 x i16> %tmp9
-; X64-LABEL: t12:
-; X64: pextrw $3, %xmm1, %eax
-; X64: movlhps %xmm0, %xmm0
-; X64: pshufhw $3, %xmm0, %xmm0
-; X64: pinsrw $5, %eax, %xmm0
-; X64: ret
}
-
define <8 x i16> @t13(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
+; X64-LABEL: t13:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X64-NEXT: pextrw $3, %xmm1, %eax
+; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,7,4,4]
+; X64-NEXT: pinsrw $4, %eax, %xmm0
+; X64-NEXT: retq
entry:
%tmp9 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 8, i32 9, i32 undef, i32 undef, i32 11, i32 3, i32 undef , i32 undef >
ret <8 x i16> %tmp9
-; X64-LABEL: t13:
-; X64: punpcklqdq %xmm0, %xmm1
-; X64: pextrw $3, %xmm1, %eax
-; X64: pshufhw $12, %xmm1, %xmm0
-; X64: pinsrw $4, %eax, %xmm0
-; X64: ret
}
-
define <8 x i16> @t14(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
+; X64-LABEL: t14:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,6,4,4]
+; X64-NEXT: retq
entry:
%tmp9 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 8, i32 9, i32 undef, i32 undef, i32 undef, i32 2, i32 undef , i32 undef >
ret <8 x i16> %tmp9
-; X64-LABEL: t14:
-; X64: punpcklqdq %xmm0, %xmm1
-; X64: pshufhw $8, %xmm1, %xmm0
-; X64: ret
}
-
; FIXME: t15 is worse off from disabling of scheduler 2-address hack.
define <8 x i16> @t15(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
+; X64-LABEL: t15:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: pextrw $7, %xmm0, %eax
+; X64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,2,4,5,6,7]
+; X64-NEXT: pinsrw $2, %eax, %xmm0
+; X64-NEXT: retq
entry:
- %tmp8 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 undef, i32 undef, i32 7, i32 2, i32 8, i32 undef, i32 undef , i32 undef >
- ret <8 x i16> %tmp8
-; X64: t15:
-; X64: pextrw $7, %xmm0, %eax
-; X64: punpcklqdq %xmm1, %xmm0
-; X64: pshuflw $-128, %xmm0, %xmm0
-; X64: pinsrw $2, %eax, %xmm0
-; X64: ret
+ %tmp8 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 undef, i32 undef, i32 7, i32 2, i32 8, i32 undef, i32 undef , i32 undef >
+ ret <8 x i16> %tmp8
}
-
; Test yonah where we convert a shuffle to pextrw and pinrsw
define <16 x i8> @t16(<16 x i8> %T0) nounwind readnone {
+; X64-LABEL: t16:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: pextrw $8, %xmm0, %eax
+; X64-NEXT: pslldq $2, %xmm0
+; X64-NEXT: andl $65280, %eax ## imm = 0xFF00
+; X64-NEXT: pextrw $1, %xmm0, %ecx
+; X64-NEXT: movzbl %cl, %ecx
+; X64-NEXT: orl %eax, %ecx
+; X64-NEXT: pinsrw $1, %ecx, %xmm0
+; X64-NEXT: retq
entry:
- %tmp8 = shufflevector <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 1, i8 1, i8 1, i8 1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8> %T0, <16 x i32> < i32 0, i32 1, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
- %tmp9 = shufflevector <16 x i8> %tmp8, <16 x i8> %T0, <16 x i32> < i32 0, i32 1, i32 2, i32 17, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
- ret <16 x i8> %tmp9
-; X64: t16:
-; X64: pextrw $8, %xmm0, %eax
-; X64: pslldq $2, %xmm0
-; X64: pextrw $1, %xmm0, %ecx
-; X64: movzbl %cl, %ecx
-; X64: orl %eax, %ecx
-; X64: pinsrw $1, %ecx, %xmm0
-; X64: ret
+ %tmp8 = shufflevector <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 1, i8 1, i8 1, i8 1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8> %T0, <16 x i32> < i32 0, i32 1, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
+ %tmp9 = shufflevector <16 x i8> %tmp8, <16 x i8> %T0, <16 x i32> < i32 0, i32 1, i32 2, i32 17, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
+ ret <16 x i8> %tmp9
}
; rdar://8520311
define <4 x i32> @t17() nounwind {
-entry:
; X64-LABEL: t17:
-; X64: movddup (%rax), %xmm0
+; X64: ## BB#0: ## %entry
+; X64-NEXT: movddup (%rax), %xmm0
+; X64-NEXT: andpd {{.*}}(%rip), %xmm0
+; X64-NEXT: retq
+entry:
%tmp1 = load <4 x float>* undef, align 16
%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
%tmp3 = load <4 x float>* undef, align 16
Modified: llvm/trunk/test/CodeGen/X86/sse41.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse41.ll?rev=218947&r1=218946&r2=218947&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse41.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse41.ll Thu Oct 2 20:37:58 2014
@@ -1,30 +1,47 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=sse4.1 -mcpu=penryn | FileCheck %s -check-prefix=X32 --check-prefix=CHECK
-; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=sse4.1 -mcpu=penryn | FileCheck %s -check-prefix=X64 --check-prefix=CHECK
+; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=sse4.1 -mcpu=penryn | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=sse4.1 -mcpu=penryn | FileCheck %s --check-prefix=X64
@g16 = external global i16
define <4 x i32> @pinsrd_1(i32 %s, <4 x i32> %tmp) nounwind {
- %tmp1 = insertelement <4 x i32> %tmp, i32 %s, i32 1
- ret <4 x i32> %tmp1
; X32-LABEL: pinsrd_1:
-; X32: pinsrd $1, 4(%esp), %xmm0
-
+; X32: ## BB#0:
+; X32-NEXT: pinsrd $1, {{[0-9]+}}(%esp), %xmm0
+; X32-NEXT: retl
+;
; X64-LABEL: pinsrd_1:
-; X64: pinsrd $1, %edi, %xmm0
+; X64: ## BB#0:
+; X64-NEXT: pinsrd $1, %edi, %xmm0
+; X64-NEXT: retq
+ %tmp1 = insertelement <4 x i32> %tmp, i32 %s, i32 1
+ ret <4 x i32> %tmp1
}
define <16 x i8> @pinsrb_1(i8 %s, <16 x i8> %tmp) nounwind {
- %tmp1 = insertelement <16 x i8> %tmp, i8 %s, i32 1
- ret <16 x i8> %tmp1
; X32-LABEL: pinsrb_1:
-; X32: pinsrb $1, 4(%esp), %xmm0
-
+; X32: ## BB#0:
+; X32-NEXT: pinsrb $1, {{[0-9]+}}(%esp), %xmm0
+; X32-NEXT: retl
+;
; X64-LABEL: pinsrb_1:
-; X64: pinsrb $1, %edi, %xmm0
+; X64: ## BB#0:
+; X64-NEXT: pinsrb $1, %edi, %xmm0
+; X64-NEXT: retq
+ %tmp1 = insertelement <16 x i8> %tmp, i8 %s, i32 1
+ ret <16 x i8> %tmp1
}
-
define <2 x i64> @pmovsxbd_1(i32* %p) nounwind {
+; X32-LABEL: pmovsxbd_1:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: pmovsxbd (%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: pmovsxbd_1:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: pmovsxbd (%rdi), %xmm0
+; X64-NEXT: retq
entry:
%0 = load i32* %p, align 4
%1 = insertelement <4 x i32> undef, i32 %0, i32 0
@@ -35,16 +52,19 @@ entry:
%6 = tail call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %5) nounwind readnone
%7 = bitcast <4 x i32> %6 to <2 x i64>
ret <2 x i64> %7
-
-; X32: _pmovsxbd_1:
-; X32: movl 4(%esp), %eax
-; X32: pmovsxbd (%eax), %xmm0
-
-; X64: _pmovsxbd_1:
-; X64: pmovsxbd (%rdi), %xmm0
}
define <2 x i64> @pmovsxwd_1(i64* %p) nounwind readonly {
+; X32-LABEL: pmovsxwd_1:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: pmovsxwd (%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: pmovsxwd_1:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: pmovsxwd (%rdi), %xmm0
+; X64-NEXT: retq
entry:
%0 = load i64* %p ; <i64> [#uses=1]
%tmp2 = insertelement <2 x i64> zeroinitializer, i64 %0, i32 0 ; <<2 x i64>> [#uses=1]
@@ -52,63 +72,59 @@ entry:
%2 = tail call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %1) nounwind readnone ; <<4 x i32>> [#uses=1]
%3 = bitcast <4 x i32> %2 to <2 x i64> ; <<2 x i64>> [#uses=1]
ret <2 x i64> %3
-
-; X32: _pmovsxwd_1:
-; X32: movl 4(%esp), %eax
-; X32: pmovsxwd (%eax), %xmm0
-
-; X64: _pmovsxwd_1:
-; X64: pmovsxwd (%rdi), %xmm0
}
-
-
-
define <2 x i64> @pmovzxbq_1() nounwind {
+; X32-LABEL: pmovzxbq_1:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl L_g16$non_lazy_ptr, %eax
+; X32-NEXT: pmovzxbq (%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: pmovzxbq_1:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: movq _g16@{{.*}}(%rip), %rax
+; X64-NEXT: pmovzxbq (%rax), %xmm0
+; X64-NEXT: retq
entry:
%0 = load i16* @g16, align 2 ; <i16> [#uses=1]
%1 = insertelement <8 x i16> undef, i16 %0, i32 0 ; <<8 x i16>> [#uses=1]
%2 = bitcast <8 x i16> %1 to <16 x i8> ; <<16 x i8>> [#uses=1]
%3 = tail call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %2) nounwind readnone ; <<2 x i64>> [#uses=1]
ret <2 x i64> %3
-
-; X32: _pmovzxbq_1:
-; X32: movl L_g16$non_lazy_ptr, %eax
-; X32: pmovzxbq (%eax), %xmm0
-
-; X64: _pmovzxbq_1:
-; X64: movq _g16 at GOTPCREL(%rip), %rax
-; X64: pmovzxbq (%rax), %xmm0
}
declare <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8>) nounwind readnone
declare <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16>) nounwind readnone
declare <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8>) nounwind readnone
-
-
-
define i32 @extractps_1(<4 x float> %v) nounwind {
+; X32-LABEL: extractps_1:
+; X32: ## BB#0:
+; X32-NEXT: extractps $3, %xmm0, %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: extractps_1:
+; X64: ## BB#0:
+; X64-NEXT: extractps $3, %xmm0, %eax
+; X64-NEXT: retq
%s = extractelement <4 x float> %v, i32 3
%i = bitcast float %s to i32
ret i32 %i
-
-; X32: _extractps_1:
-; X32: extractps $3, %xmm0, %eax
-
-; X64: _extractps_1:
-; X64: extractps $3, %xmm0, %eax
}
define i32 @extractps_2(<4 x float> %v) nounwind {
+; X32-LABEL: extractps_2:
+; X32: ## BB#0:
+; X32-NEXT: extractps $3, %xmm0, %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: extractps_2:
+; X64: ## BB#0:
+; X64-NEXT: extractps $3, %xmm0, %eax
+; X64-NEXT: retq
%t = bitcast <4 x float> %v to <4 x i32>
%s = extractelement <4 x i32> %t, i32 3
ret i32 %s
-
-; X32: _extractps_2:
-; X32: extractps $3, %xmm0, %eax
-
-; X64: _extractps_2:
-; X64: extractps $3, %xmm0, %eax
}
@@ -117,106 +133,152 @@ define i32 @extractps_2(<4 x float> %v)
; is bitcasted to i32, but unsuitable for much of anything else.
define float @ext_1(<4 x float> %v) nounwind {
+; X32-LABEL: ext_1:
+; X32: ## BB#0:
+; X32-NEXT: pushl %eax
+; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,0,0,0]
+; X32-NEXT: addss LCPI7_0, %xmm0
+; X32-NEXT: movss %xmm0, (%esp)
+; X32-NEXT: flds (%esp)
+; X32-NEXT: popl %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: ext_1:
+; X64: ## BB#0:
+; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,0,0,0]
+; X64-NEXT: addss {{.*}}(%rip), %xmm0
+; X64-NEXT: retq
%s = extractelement <4 x float> %v, i32 3
%t = fadd float %s, 1.0
ret float %t
-
-; X32: _ext_1:
-; X32: pshufd $3, %xmm0, %xmm0
-; X32: addss LCPI7_0, %xmm0
-
-; X64: _ext_1:
-; X64: pshufd $3, %xmm0, %xmm0
-; X64: addss LCPI7_0(%rip), %xmm0
}
define float @ext_2(<4 x float> %v) nounwind {
+; X32-LABEL: ext_2:
+; X32: ## BB#0:
+; X32-NEXT: pushl %eax
+; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,0,0,0]
+; X32-NEXT: movss %xmm0, (%esp)
+; X32-NEXT: flds (%esp)
+; X32-NEXT: popl %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: ext_2:
+; X64: ## BB#0:
+; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,0,0,0]
+; X64-NEXT: retq
%s = extractelement <4 x float> %v, i32 3
ret float %s
-
-; X32: _ext_2:
-; X32: pshufd $3, %xmm0, %xmm0
-
-; X64: _ext_2:
-; X64: pshufd $3, %xmm0, %xmm0
}
define i32 @ext_3(<4 x i32> %v) nounwind {
+; X32-LABEL: ext_3:
+; X32: ## BB#0:
+; X32-NEXT: pextrd $3, %xmm0, %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: ext_3:
+; X64: ## BB#0:
+; X64-NEXT: pextrd $3, %xmm0, %eax
+; X64-NEXT: retq
%i = extractelement <4 x i32> %v, i32 3
ret i32 %i
-
-; X32: _ext_3:
-; X32: pextrd $3, %xmm0, %eax
-
-; X64: _ext_3:
-; X64: pextrd $3, %xmm0, %eax
}
define <4 x float> @insertps_1(<4 x float> %t1, <4 x float> %t2) nounwind {
- %tmp1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %t1, <4 x float> %t2, i32 1) nounwind readnone
- ret <4 x float> %tmp1
-; X32: _insertps_1:
-; X32: insertps $1, %xmm1, %xmm0
-
-; X64: _insertps_1:
-; X64: insertps $1, %xmm1, %xmm0
+; X32-LABEL: insertps_1:
+; X32: ## BB#0:
+; X32-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[1,2,3]
+; X32-NEXT: retl
+;
+; X64-LABEL: insertps_1:
+; X64: ## BB#0:
+; X64-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[1,2,3]
+; X64-NEXT: retq
+ %tmp1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %t1, <4 x float> %t2, i32 1) nounwind readnone
+ ret <4 x float> %tmp1
}
declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i32) nounwind readnone
define <4 x float> @insertps_2(<4 x float> %t1, float %t2) nounwind {
- %tmp1 = insertelement <4 x float> %t1, float %t2, i32 0
- ret <4 x float> %tmp1
-; X32: _insertps_2:
-; X32: insertps $0, 4(%esp), %xmm0
-
-; X64: _insertps_2:
-; X64: insertps $0, %xmm1, %xmm0
+; X32-LABEL: insertps_2:
+; X32: ## BB#0:
+; X32-NEXT: insertps $0, {{[0-9]+}}(%esp), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: insertps_2:
+; X64: ## BB#0:
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; X64-NEXT: retq
+ %tmp1 = insertelement <4 x float> %t1, float %t2, i32 0
+ ret <4 x float> %tmp1
}
-
define <4 x float> @insertps_3(<4 x float> %t1, <4 x float> %t2) nounwind {
- %tmp2 = extractelement <4 x float> %t2, i32 0
- %tmp1 = insertelement <4 x float> %t1, float %tmp2, i32 0
- ret <4 x float> %tmp1
-; X32: _insertps_3:
-; X32: insertps $0, %xmm1, %xmm0
-
-; X64: _insertps_3:
-; X64: insertps $0, %xmm1, %xmm0
+; X32-LABEL: insertps_3:
+; X32: ## BB#0:
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; X32-NEXT: retl
+;
+; X64-LABEL: insertps_3:
+; X64: ## BB#0:
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; X64-NEXT: retq
+ %tmp2 = extractelement <4 x float> %t2, i32 0
+ %tmp1 = insertelement <4 x float> %t1, float %tmp2, i32 0
+ ret <4 x float> %tmp1
}
define i32 @ptestz_1(<2 x i64> %t1, <2 x i64> %t2) nounwind {
- %tmp1 = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %t1, <2 x i64> %t2) nounwind readnone
- ret i32 %tmp1
-; X32: _ptestz_1:
-; X32: ptest %xmm1, %xmm0
-; X32: sete %al
-
-; X64: _ptestz_1:
-; X64: ptest %xmm1, %xmm0
-; X64: sete %al
+; X32-LABEL: ptestz_1:
+; X32: ## BB#0:
+; X32-NEXT: ptest %xmm1, %xmm0
+; X32-NEXT: sete %al
+; X32-NEXT: movzbl %al, %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: ptestz_1:
+; X64: ## BB#0:
+; X64-NEXT: ptest %xmm1, %xmm0
+; X64-NEXT: sete %al
+; X64-NEXT: movzbl %al, %eax
+; X64-NEXT: retq
+ %tmp1 = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %t1, <2 x i64> %t2) nounwind readnone
+ ret i32 %tmp1
}
define i32 @ptestz_2(<2 x i64> %t1, <2 x i64> %t2) nounwind {
- %tmp1 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %t1, <2 x i64> %t2) nounwind readnone
- ret i32 %tmp1
-; X32: _ptestz_2:
-; X32: ptest %xmm1, %xmm0
-; X32: sbbl %eax
-
-; X64: _ptestz_2:
-; X64: ptest %xmm1, %xmm0
-; X64: sbbl %eax
+; X32-LABEL: ptestz_2:
+; X32: ## BB#0:
+; X32-NEXT: ptest %xmm1, %xmm0
+; X32-NEXT: sbbl %eax, %eax
+; X32-NEXT: andl $1, %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: ptestz_2:
+; X64: ## BB#0:
+; X64-NEXT: ptest %xmm1, %xmm0
+; X64-NEXT: sbbl %eax, %eax
+; X64-NEXT: andl $1, %eax
+; X64-NEXT: retq
+ %tmp1 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %t1, <2 x i64> %t2) nounwind readnone
+ ret i32 %tmp1
}
define i32 @ptestz_3(<2 x i64> %t1, <2 x i64> %t2) nounwind {
- %tmp1 = call i32 @llvm.x86.sse41.ptestnzc(<2 x i64> %t1, <2 x i64> %t2) nounwind readnone
- ret i32 %tmp1
-; X32: _ptestz_3:
-; X32: ptest %xmm1, %xmm0
-; X32: seta %al
-
-; X64: _ptestz_3:
-; X64: ptest %xmm1, %xmm0
-; X64: seta %al
+; X32-LABEL: ptestz_3:
+; X32: ## BB#0:
+; X32-NEXT: ptest %xmm1, %xmm0
+; X32-NEXT: seta %al
+; X32-NEXT: movzbl %al, %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: ptestz_3:
+; X64: ## BB#0:
+; X64-NEXT: ptest %xmm1, %xmm0
+; X64-NEXT: seta %al
+; X64-NEXT: movzbl %al, %eax
+; X64-NEXT: retq
+ %tmp1 = call i32 @llvm.x86.sse41.ptestnzc(<2 x i64> %t1, <2 x i64> %t2) nounwind readnone
+ ret i32 %tmp1
}
@@ -227,6 +289,23 @@ declare i32 @llvm.x86.sse41.ptestnzc(<2
; This used to compile to insertps $0 + insertps $16. insertps $0 is always
; pointless.
define <2 x float> @buildvector(<2 x float> %A, <2 x float> %B) nounwind {
+; X32-LABEL: buildvector:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,0,0,0]
+; X32-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,0,0,0]
+; X32-NEXT: addss %xmm1, %xmm0
+; X32-NEXT: addss %xmm2, %xmm3
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
+; X32-NEXT: retl
+;
+; X64-LABEL: buildvector:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,0,0,0]
+; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,0,0,0]
+; X64-NEXT: addss %xmm1, %xmm0
+; X64-NEXT: addss %xmm2, %xmm3
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
+; X64-NEXT: retq
entry:
%tmp7 = extractelement <2 x float> %A, i32 0
%tmp5 = extractelement <2 x float> %A, i32 1
@@ -237,84 +316,105 @@ entry:
%tmp11 = insertelement <2 x float> undef, float %add.r, i32 0
%tmp9 = insertelement <2 x float> %tmp11, float %add.i, i32 1
ret <2 x float> %tmp9
-; X32-LABEL: buildvector:
-; X32-NOT: insertps $0
-; X32: insertps $16
-; X32-NOT: insertps $0
-; X32: ret
-; X64-LABEL: buildvector:
-; X64-NOT: insertps $0
-; X64: insertps $16
-; X64-NOT: insertps $0
-; X64: ret
}
define <4 x float> @insertps_from_shufflevector_1(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
+; X32-LABEL: insertps_from_shufflevector_1:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: insertps $48, (%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: insertps_from_shufflevector_1:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: insertps $48, (%rdi), %xmm0
+; X64-NEXT: retq
entry:
%0 = load <4 x float>* %pb, align 16
%vecinit6 = shufflevector <4 x float> %a, <4 x float> %0, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
ret <4 x float> %vecinit6
-; CHECK-LABEL: insertps_from_shufflevector_1:
-; CHECK-NOT: movss
-; CHECK-NOT: shufps
-; CHECK: insertps $48,
-; CHECK: ret
}
define <4 x float> @insertps_from_shufflevector_2(<4 x float> %a, <4 x float> %b) {
+; X32-LABEL: insertps_from_shufflevector_2:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
+; X32-NEXT: retl
+;
+; X64-LABEL: insertps_from_shufflevector_2:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
+; X64-NEXT: retq
entry:
%vecinit6 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 5, i32 3>
ret <4 x float> %vecinit6
-; CHECK-LABEL: insertps_from_shufflevector_2:
-; CHECK-NOT: shufps
-; CHECK: insertps $96,
-; CHECK: ret
}
; For loading an i32 from memory into an xmm register we use pinsrd
; instead of insertps
define <4 x i32> @pinsrd_from_shufflevector_i32(<4 x i32> %a, <4 x i32>* nocapture readonly %pb) {
+; X32-LABEL: pinsrd_from_shufflevector_i32:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: pinsrd $3, (%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: pinsrd_from_shufflevector_i32:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: pinsrd $3, (%rdi), %xmm0
+; X64-NEXT: retq
entry:
%0 = load <4 x i32>* %pb, align 16
%vecinit6 = shufflevector <4 x i32> %a, <4 x i32> %0, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
ret <4 x i32> %vecinit6
-; CHECK-LABEL: pinsrd_from_shufflevector_i32:
-; CHECK-NOT: movss
-; CHECK-NOT: shufps
-; CHECK: pinsrd $3,
-; CHECK: ret
}
define <4 x i32> @insertps_from_shufflevector_i32_2(<4 x i32> %a, <4 x i32> %b) {
+; X32-LABEL: insertps_from_shufflevector_i32_2:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[3],xmm0[2,3]
+; X32-NEXT: retl
+;
+; X64-LABEL: insertps_from_shufflevector_i32_2:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[3],xmm0[2,3]
+; X64-NEXT: retq
entry:
%vecinit6 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 7, i32 2, i32 3>
ret <4 x i32> %vecinit6
-; CHECK-LABEL: insertps_from_shufflevector_i32_2:
-; CHECK-NOT: shufps
-; CHECK-NOT: movaps
-; CHECK: insertps $208,
-; CHECK: ret
}
define <4 x float> @insertps_from_load_ins_elt_undef(<4 x float> %a, float* %b) {
-; CHECK-LABEL: insertps_from_load_ins_elt_undef:
-; CHECK-NOT: movss
-; CHECK-NOT: shufps
-; CHECK: insertps $16,
-; CHECK: ret
+; X32-LABEL: insertps_from_load_ins_elt_undef:
+; X32: ## BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: insertps $16, (%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: insertps_from_load_ins_elt_undef:
+; X64: ## BB#0:
+; X64-NEXT: insertps $16, (%rdi), %xmm0
+; X64-NEXT: retq
%1 = load float* %b, align 4
%2 = insertelement <4 x float> undef, float %1, i32 0
%result = shufflevector <4 x float> %a, <4 x float> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 3>
ret <4 x float> %result
}
-define <4 x i32> @insertps_from_load_ins_elt_undef_i32(<4 x i32> %a, i32* %b) {
-; CHECK-LABEL: insertps_from_load_ins_elt_undef_i32:
; TODO: Like on pinsrd_from_shufflevector_i32, remove this mov instr
-;; aCHECK-NOT: movd
-; CHECK-NOT: shufps
-; CHECK: insertps $32,
-; CHECK: ret
+define <4 x i32> @insertps_from_load_ins_elt_undef_i32(<4 x i32> %a, i32* %b) {
+; X32-LABEL: insertps_from_load_ins_elt_undef_i32:
+; X32: ## BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movd (%eax), %xmm1
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; X32-NEXT: retl
+;
+; X64-LABEL: insertps_from_load_ins_elt_undef_i32:
+; X64: ## BB#0:
+; X64-NEXT: movd (%rdi), %xmm1
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; X64-NEXT: retq
%1 = load i32* %b, align 4
%2 = insertelement <4 x i32> undef, i32 %1, i32 0
%result = shufflevector <4 x i32> %a, <4 x i32> %2, <4 x i32> <i32 0, i32 1, i32 4, i32 3>
@@ -323,11 +423,15 @@ define <4 x i32> @insertps_from_load_ins
;;;;;; Shuffles optimizable with a single insertps instruction
define <4 x float> @shuf_XYZ0(<4 x float> %x, <4 x float> %a) {
-; CHECK-LABEL: shuf_XYZ0:
-; CHECK-NOT: pextrd
-; CHECK-NOT: punpckldq
-; CHECK: insertps $8
-; CHECK: ret
+; X32-LABEL: shuf_XYZ0:
+; X32: ## BB#0:
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],zero
+; X32-NEXT: retl
+;
+; X64-LABEL: shuf_XYZ0:
+; X64: ## BB#0:
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],zero
+; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 0
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
%vecext1 = extractelement <4 x float> %x, i32 1
@@ -339,11 +443,15 @@ define <4 x float> @shuf_XYZ0(<4 x float
}
define <4 x float> @shuf_XY00(<4 x float> %x, <4 x float> %a) {
-; CHECK-LABEL: shuf_XY00:
-; CHECK-NOT: pextrd
-; CHECK-NOT: punpckldq
-; CHECK: insertps $12
-; CHECK: ret
+; X32-LABEL: shuf_XY00:
+; X32: ## BB#0:
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],zero,zero
+; X32-NEXT: retl
+;
+; X64-LABEL: shuf_XY00:
+; X64: ## BB#0:
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],zero,zero
+; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 0
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
%vecext1 = extractelement <4 x float> %x, i32 1
@@ -354,11 +462,15 @@ define <4 x float> @shuf_XY00(<4 x float
}
define <4 x float> @shuf_XYY0(<4 x float> %x, <4 x float> %a) {
-; CHECK-LABEL: shuf_XYY0:
-; CHECK-NOT: pextrd
-; CHECK-NOT: punpckldq
-; CHECK: insertps $104
-; CHECK: ret
+; X32-LABEL: shuf_XYY0:
+; X32: ## BB#0:
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,1],zero
+; X32-NEXT: retl
+;
+; X64-LABEL: shuf_XYY0:
+; X64: ## BB#0:
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,1],zero
+; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 0
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
%vecext1 = extractelement <4 x float> %x, i32 1
@@ -369,9 +481,15 @@ define <4 x float> @shuf_XYY0(<4 x float
}
define <4 x float> @shuf_XYW0(<4 x float> %x, <4 x float> %a) {
-; CHECK-LABEL: shuf_XYW0:
-; CHECK: insertps $232
-; CHECK: ret
+; X32-LABEL: shuf_XYW0:
+; X32: ## BB#0:
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,3],zero
+; X32-NEXT: retl
+;
+; X64-LABEL: shuf_XYW0:
+; X64: ## BB#0:
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,3],zero
+; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 0
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
%vecext1 = extractelement <4 x float> %x, i32 1
@@ -383,11 +501,15 @@ define <4 x float> @shuf_XYW0(<4 x float
}
define <4 x float> @shuf_W00W(<4 x float> %x, <4 x float> %a) {
-; CHECK-LABEL: shuf_W00W:
-; CHECK-NOT: pextrd
-; CHECK-NOT: punpckldq
-; CHECK: insertps $198
-; CHECK: ret
+; X32-LABEL: shuf_W00W:
+; X32: ## BB#0:
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[3],zero,zero,xmm0[3]
+; X32-NEXT: retl
+;
+; X64-LABEL: shuf_W00W:
+; X64: ## BB#0:
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[3],zero,zero,xmm0[3]
+; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 3
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
%vecinit2 = insertelement <4 x float> %vecinit, float 0.0, i32 1
@@ -397,11 +519,21 @@ define <4 x float> @shuf_W00W(<4 x float
}
define <4 x float> @shuf_X00A(<4 x float> %x, <4 x float> %a) {
-; CHECK-LABEL: shuf_X00A:
-; CHECK-NOT: movaps
-; CHECK-NOT: shufps
-; CHECK: insertps $48
-; CHECK: ret
+; X32-LABEL: shuf_X00A:
+; X32: ## BB#0:
+; X32-NEXT: xorps %xmm2, %xmm2
+; X32-NEXT: movss %xmm0, %xmm2
+; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[0]
+; X32-NEXT: movaps %xmm2, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: shuf_X00A:
+; X64: ## BB#0:
+; X64-NEXT: xorps %xmm2, %xmm2
+; X64-NEXT: movss %xmm0, %xmm2
+; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[0]
+; X64-NEXT: movaps %xmm2, %xmm0
+; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 0
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
%vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 1
@@ -411,11 +543,21 @@ define <4 x float> @shuf_X00A(<4 x float
}
define <4 x float> @shuf_X00X(<4 x float> %x, <4 x float> %a) {
-; CHECK-LABEL: shuf_X00X:
-; CHECK-NOT: movaps
-; CHECK-NOT: shufps
-; CHECK: insertps $48
-; CHECK: ret
+; X32-LABEL: shuf_X00X:
+; X32: ## BB#0:
+; X32-NEXT: xorps %xmm1, %xmm1
+; X32-NEXT: movss %xmm0, %xmm1
+; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
+; X32-NEXT: movaps %xmm1, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: shuf_X00X:
+; X64: ## BB#0:
+; X64-NEXT: xorps %xmm1, %xmm1
+; X64-NEXT: movss %xmm0, %xmm1
+; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
+; X64-NEXT: movaps %xmm1, %xmm0
+; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 0
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
%vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 1
@@ -425,12 +567,23 @@ define <4 x float> @shuf_X00X(<4 x float
}
define <4 x float> @shuf_X0YC(<4 x float> %x, <4 x float> %a) {
-; CHECK-LABEL: shuf_X0YC:
-; CHECK: shufps
-; CHECK-NOT: movhlps
-; CHECK-NOT: shufps
-; CHECK: insertps $176
-; CHECK: ret
+; X32-LABEL: shuf_X0YC:
+; X32: ## BB#0:
+; X32-NEXT: xorps %xmm2, %xmm2
+; X32-NEXT: movss %xmm0, %xmm2
+; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1,0]
+; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2]
+; X32-NEXT: movaps %xmm2, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: shuf_X0YC:
+; X64: ## BB#0:
+; X64-NEXT: xorps %xmm2, %xmm2
+; X64-NEXT: movss %xmm0, %xmm2
+; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1,0]
+; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2]
+; X64-NEXT: movaps %xmm2, %xmm0
+; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 0
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
%vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 1
@@ -440,11 +593,15 @@ define <4 x float> @shuf_X0YC(<4 x float
}
define <4 x i32> @i32_shuf_XYZ0(<4 x i32> %x, <4 x i32> %a) {
-; CHECK-LABEL: i32_shuf_XYZ0:
-; CHECK-NOT: pextrd
-; CHECK-NOT: punpckldq
-; CHECK: insertps $8
-; CHECK: ret
+; X32-LABEL: i32_shuf_XYZ0:
+; X32: ## BB#0:
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],zero
+; X32-NEXT: retl
+;
+; X64-LABEL: i32_shuf_XYZ0:
+; X64: ## BB#0:
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],zero
+; X64-NEXT: retq
%vecext = extractelement <4 x i32> %x, i32 0
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
%vecext1 = extractelement <4 x i32> %x, i32 1
@@ -456,11 +613,15 @@ define <4 x i32> @i32_shuf_XYZ0(<4 x i32
}
define <4 x i32> @i32_shuf_XY00(<4 x i32> %x, <4 x i32> %a) {
-; CHECK-LABEL: i32_shuf_XY00:
-; CHECK-NOT: pextrd
-; CHECK-NOT: punpckldq
-; CHECK: insertps $12
-; CHECK: ret
+; X32-LABEL: i32_shuf_XY00:
+; X32: ## BB#0:
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],zero,zero
+; X32-NEXT: retl
+;
+; X64-LABEL: i32_shuf_XY00:
+; X64: ## BB#0:
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],zero,zero
+; X64-NEXT: retq
%vecext = extractelement <4 x i32> %x, i32 0
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
%vecext1 = extractelement <4 x i32> %x, i32 1
@@ -471,11 +632,15 @@ define <4 x i32> @i32_shuf_XY00(<4 x i32
}
define <4 x i32> @i32_shuf_XYY0(<4 x i32> %x, <4 x i32> %a) {
-; CHECK-LABEL: i32_shuf_XYY0:
-; CHECK-NOT: pextrd
-; CHECK-NOT: punpckldq
-; CHECK: insertps $104
-; CHECK: ret
+; X32-LABEL: i32_shuf_XYY0:
+; X32: ## BB#0:
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,1],zero
+; X32-NEXT: retl
+;
+; X64-LABEL: i32_shuf_XYY0:
+; X64: ## BB#0:
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,1],zero
+; X64-NEXT: retq
%vecext = extractelement <4 x i32> %x, i32 0
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
%vecext1 = extractelement <4 x i32> %x, i32 1
@@ -486,11 +651,15 @@ define <4 x i32> @i32_shuf_XYY0(<4 x i32
}
define <4 x i32> @i32_shuf_XYW0(<4 x i32> %x, <4 x i32> %a) {
-; CHECK-LABEL: i32_shuf_XYW0:
-; CHECK-NOT: pextrd
-; CHECK-NOT: punpckldq
-; CHECK: insertps $232
-; CHECK: ret
+; X32-LABEL: i32_shuf_XYW0:
+; X32: ## BB#0:
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,3],zero
+; X32-NEXT: retl
+;
+; X64-LABEL: i32_shuf_XYW0:
+; X64: ## BB#0:
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,3],zero
+; X64-NEXT: retq
%vecext = extractelement <4 x i32> %x, i32 0
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
%vecext1 = extractelement <4 x i32> %x, i32 1
@@ -502,11 +671,15 @@ define <4 x i32> @i32_shuf_XYW0(<4 x i32
}
define <4 x i32> @i32_shuf_W00W(<4 x i32> %x, <4 x i32> %a) {
-; CHECK-LABEL: i32_shuf_W00W:
-; CHECK-NOT: pextrd
-; CHECK-NOT: punpckldq
-; CHECK: insertps $198
-; CHECK: ret
+; X32-LABEL: i32_shuf_W00W:
+; X32: ## BB#0:
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[3],zero,zero,xmm0[3]
+; X32-NEXT: retl
+;
+; X64-LABEL: i32_shuf_W00W:
+; X64: ## BB#0:
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[3],zero,zero,xmm0[3]
+; X64-NEXT: retq
%vecext = extractelement <4 x i32> %x, i32 3
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
%vecinit2 = insertelement <4 x i32> %vecinit, i32 0, i32 1
@@ -516,11 +689,21 @@ define <4 x i32> @i32_shuf_W00W(<4 x i32
}
define <4 x i32> @i32_shuf_X00A(<4 x i32> %x, <4 x i32> %a) {
-; CHECK-LABEL: i32_shuf_X00A:
-; CHECK-NOT: movaps
-; CHECK-NOT: shufps
-; CHECK: insertps $48
-; CHECK: ret
+; X32-LABEL: i32_shuf_X00A:
+; X32: ## BB#0:
+; X32-NEXT: xorps %xmm2, %xmm2
+; X32-NEXT: movss %xmm0, %xmm2
+; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[0]
+; X32-NEXT: movaps %xmm2, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: i32_shuf_X00A:
+; X64: ## BB#0:
+; X64-NEXT: xorps %xmm2, %xmm2
+; X64-NEXT: movss %xmm0, %xmm2
+; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[0]
+; X64-NEXT: movaps %xmm2, %xmm0
+; X64-NEXT: retq
%vecext = extractelement <4 x i32> %x, i32 0
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
%vecinit1 = insertelement <4 x i32> %vecinit, i32 0, i32 1
@@ -530,11 +713,21 @@ define <4 x i32> @i32_shuf_X00A(<4 x i32
}
define <4 x i32> @i32_shuf_X00X(<4 x i32> %x, <4 x i32> %a) {
-; CHECK-LABEL: i32_shuf_X00X:
-; CHECK-NOT: movaps
-; CHECK-NOT: shufps
-; CHECK: insertps $48
-; CHECK: ret
+; X32-LABEL: i32_shuf_X00X:
+; X32: ## BB#0:
+; X32-NEXT: xorps %xmm1, %xmm1
+; X32-NEXT: movss %xmm0, %xmm1
+; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
+; X32-NEXT: movaps %xmm1, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: i32_shuf_X00X:
+; X64: ## BB#0:
+; X64-NEXT: xorps %xmm1, %xmm1
+; X64-NEXT: movss %xmm0, %xmm1
+; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
+; X64-NEXT: movaps %xmm1, %xmm0
+; X64-NEXT: retq
%vecext = extractelement <4 x i32> %x, i32 0
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
%vecinit1 = insertelement <4 x i32> %vecinit, i32 0, i32 1
@@ -544,12 +737,23 @@ define <4 x i32> @i32_shuf_X00X(<4 x i32
}
define <4 x i32> @i32_shuf_X0YC(<4 x i32> %x, <4 x i32> %a) {
-; CHECK-LABEL: i32_shuf_X0YC:
-; CHECK: shufps
-; CHECK-NOT: movhlps
-; CHECK-NOT: shufps
-; CHECK: insertps $176
-; CHECK: ret
+; X32-LABEL: i32_shuf_X0YC:
+; X32: ## BB#0:
+; X32-NEXT: xorps %xmm2, %xmm2
+; X32-NEXT: movss %xmm0, %xmm2
+; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1,0]
+; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2]
+; X32-NEXT: movaps %xmm2, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: i32_shuf_X0YC:
+; X64: ## BB#0:
+; X64-NEXT: xorps %xmm2, %xmm2
+; X64-NEXT: movss %xmm0, %xmm2
+; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1,0]
+; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2]
+; X64-NEXT: movaps %xmm2, %xmm0
+; X64-NEXT: retq
%vecext = extractelement <4 x i32> %x, i32 0
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
%vecinit1 = insertelement <4 x i32> %vecinit, i32 0, i32 1
@@ -560,11 +764,19 @@ define <4 x i32> @i32_shuf_X0YC(<4 x i32
;; Test for a bug in the first implementation of LowerBuildVectorv4x32
define < 4 x float> @test_insertps_no_undef(<4 x float> %x) {
-; CHECK-LABEL: test_insertps_no_undef:
-; CHECK: movaps %xmm0, %xmm1
-; CHECK-NEXT: insertps $8, %xmm1, %xmm1
-; CHECK-NEXT: maxps %xmm1, %xmm0
-; CHECK-NEXT: ret
+; X32-LABEL: test_insertps_no_undef:
+; X32: ## BB#0:
+; X32-NEXT: movaps %xmm0, %xmm1
+; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],zero
+; X32-NEXT: maxps %xmm1, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: test_insertps_no_undef:
+; X64: ## BB#0:
+; X64-NEXT: movaps %xmm0, %xmm1
+; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],zero
+; X64-NEXT: maxps %xmm1, %xmm0
+; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 0
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
%vecext1 = extractelement <4 x float> %x, i32 1
@@ -578,48 +790,75 @@ define < 4 x float> @test_insertps_no_un
}
define <8 x i16> @blendvb_fallback(<8 x i1> %mask, <8 x i16> %x, <8 x i16> %y) {
-; CHECK-LABEL: blendvb_fallback
-; CHECK: blendvb
-; CHECK: ret
+; X32-LABEL: blendvb_fallback:
+; X32: ## BB#0:
+; X32-NEXT: psllw $15, %xmm0
+; X32-NEXT: psraw $15, %xmm0
+; X32-NEXT: pblendvb %xmm1, %xmm2
+; X32-NEXT: movdqa %xmm2, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: blendvb_fallback:
+; X64: ## BB#0:
+; X64-NEXT: psllw $15, %xmm0
+; X64-NEXT: psraw $15, %xmm0
+; X64-NEXT: pblendvb %xmm1, %xmm2
+; X64-NEXT: movdqa %xmm2, %xmm0
+; X64-NEXT: retq
%ret = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %y
ret <8 x i16> %ret
}
-define <4 x float> @insertps_from_vector_load(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
-; CHECK-LABEL: insertps_from_vector_load:
; On X32, account for the argument's move to registers
-; X32: movl 4(%esp), %eax
-; CHECK-NOT: mov
-; CHECK: insertps $48
-; CHECK-NEXT: ret
+define <4 x float> @insertps_from_vector_load(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
+; X32-LABEL: insertps_from_vector_load:
+; X32: ## BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: insertps $48, (%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: insertps_from_vector_load:
+; X64: ## BB#0:
+; X64-NEXT: insertps $48, (%rdi), %xmm0
+; X64-NEXT: retq
%1 = load <4 x float>* %pb, align 16
%2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48)
ret <4 x float> %2
}
;; Use a non-zero CountS for insertps
-define <4 x float> @insertps_from_vector_load_offset(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
-; CHECK-LABEL: insertps_from_vector_load_offset:
-; On X32, account for the argument's move to registers
-; X32: movl 4(%esp), %eax
-; CHECK-NOT: mov
;; Try to match a bit more of the instr, since we need the load's offset.
-; CHECK: insertps $96, 4(%{{...}}), %
-; CHECK-NEXT: ret
+define <4 x float> @insertps_from_vector_load_offset(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
+; X32-LABEL: insertps_from_vector_load_offset:
+; X32: ## BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: insertps $96, 4(%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: insertps_from_vector_load_offset:
+; X64: ## BB#0:
+; X64-NEXT: insertps $96, 4(%rdi), %xmm0
+; X64-NEXT: retq
%1 = load <4 x float>* %pb, align 16
%2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96)
ret <4 x float> %2
}
-define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x float>* nocapture readonly %pb, i64 %index) {
-; CHECK-LABEL: insertps_from_vector_load_offset_2:
-; On X32, account for the argument's move to registers
-; X32: movl 4(%esp), %eax
-; X32: movl 8(%esp), %ecx
-; CHECK-NOT: mov
;; Try to match a bit more of the instr, since we need the load's offset.
-; CHECK: insertps $-64, 12(%{{...}},%{{...}}), %
-; CHECK-NEXT: ret
+define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x float>* nocapture readonly %pb, i64 %index) {
+; X32-LABEL: insertps_from_vector_load_offset_2:
+; X32: ## BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: shll $4, %ecx
+; X32-NEXT: insertps $-64, 12(%eax,%ecx), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: insertps_from_vector_load_offset_2:
+; X64: ## BB#0:
+; X64-NEXT: shlq $4, %rsi
+; X64-NEXT: insertps $-64, 12(%rdi,%rsi), %xmm0
+; X64-NEXT: retq
%1 = getelementptr inbounds <4 x float>* %pb, i64 %index
%2 = load <4 x float>* %1, align 16
%3 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %2, i32 192)
@@ -627,13 +866,17 @@ define <4 x float> @insertps_from_vector
}
define <4 x float> @insertps_from_broadcast_loadf32(<4 x float> %a, float* nocapture readonly %fb, i64 %index) {
-; CHECK-LABEL: insertps_from_broadcast_loadf32:
-; On X32, account for the arguments' move to registers
-; X32: movl 8(%esp), %eax
-; X32: movl 4(%esp), %ecx
-; CHECK-NOT: mov
-; CHECK: insertps $48
-; CHECK-NEXT: ret
+; X32-LABEL: insertps_from_broadcast_loadf32:
+; X32: ## BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: insertps $48, (%ecx,%eax,4), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: insertps_from_broadcast_loadf32:
+; X64: ## BB#0:
+; X64-NEXT: insertps $48, (%rdi,%rsi,4), %xmm0
+; X64-NEXT: retq
%1 = getelementptr inbounds float* %fb, i64 %index
%2 = load float* %1, align 4
%3 = insertelement <4 x float> undef, float %2, i32 0
@@ -645,12 +888,16 @@ define <4 x float> @insertps_from_broadc
}
define <4 x float> @insertps_from_broadcast_loadv4f32(<4 x float> %a, <4 x float>* nocapture readonly %b) {
-; CHECK-LABEL: insertps_from_broadcast_loadv4f32:
-; On X32, account for the arguments' move to registers
-; X32: movl 4(%esp), %{{...}}
-; CHECK-NOT: mov
-; CHECK: insertps $48
-; CHECK-NEXT: ret
+; X32-LABEL: insertps_from_broadcast_loadv4f32:
+; X32: ## BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: insertps $48, (%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: insertps_from_broadcast_loadv4f32:
+; X64: ## BB#0:
+; X64-NEXT: insertps $48, (%rdi), %xmm0
+; X64-NEXT: retq
%1 = load <4 x float>* %b, align 4
%2 = extractelement <4 x float> %1, i32 0
%3 = insertelement <4 x float> undef, float %2, i32 0
@@ -663,20 +910,33 @@ define <4 x float> @insertps_from_broadc
;; FIXME: We're emitting an extraneous pshufd/vbroadcast.
define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d, float* nocapture readonly %fb, i64 %index) {
-; CHECK-LABEL: insertps_from_broadcast_multiple_use:
-; On X32, account for the arguments' move to registers
-; X32: movl 8(%esp), %eax
-; X32: movl 4(%esp), %ecx
-; CHECK: movss
-; CHECK-NOT: mov
-; CHECK: insertps $48
-; CHECK: insertps $48
-; CHECK: insertps $48
-; CHECK: insertps $48
-; CHECK: addps
-; CHECK: addps
-; CHECK: addps
-; CHECK-NEXT: ret
+; X32-LABEL: insertps_from_broadcast_multiple_use:
+; X32: ## BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movss (%ecx,%eax,4), %xmm4
+; X32-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,0,0,0]
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
+; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
+; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[0]
+; X32-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
+; X32-NEXT: addps %xmm1, %xmm0
+; X32-NEXT: addps %xmm2, %xmm3
+; X32-NEXT: addps %xmm3, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: insertps_from_broadcast_multiple_use:
+; X64: ## BB#0:
+; X64-NEXT: movss (%rdi,%rsi,4), %xmm4
+; X64-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,0,0,0]
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
+; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
+; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[0]
+; X64-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
+; X64-NEXT: addps %xmm1, %xmm0
+; X64-NEXT: addps %xmm2, %xmm3
+; X64-NEXT: addps %xmm3, %xmm0
+; X64-NEXT: retq
%1 = getelementptr inbounds float* %fb, i64 %index
%2 = load float* %1, align 4
%3 = insertelement <4 x float> undef, float %2, i32 0
@@ -694,10 +954,20 @@ define <4 x float> @insertps_from_broadc
}
define <4 x float> @insertps_with_undefs(<4 x float> %a, float* %b) {
-; CHECK-LABEL: insertps_with_undefs:
-; CHECK-NOT: shufps
-; CHECK: insertps $32, %xmm0
-; CHECK: ret
+; X32-LABEL: insertps_with_undefs:
+; X32: ## BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movss (%eax), %xmm1
+; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0],xmm1[3]
+; X32-NEXT: movaps %xmm1, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: insertps_with_undefs:
+; X64: ## BB#0:
+; X64-NEXT: movss (%rdi), %xmm1
+; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0],xmm1[3]
+; X64-NEXT: movaps %xmm1, %xmm0
+; X64-NEXT: retq
%1 = load float* %b, align 4
%2 = insertelement <4 x float> undef, float %1, i32 0
%result = shufflevector <4 x float> %a, <4 x float> %2, <4 x i32> <i32 4, i32 undef, i32 0, i32 7>
@@ -707,9 +977,16 @@ define <4 x float> @insertps_with_undefs
; Test for a bug in X86ISelLowering.cpp:getINSERTPS where we were using
; the destination index to change the load, instead of the source index.
define <4 x float> @pr20087(<4 x float> %a, <4 x float> *%ptr) {
-; CHECK-LABEL: pr20087:
-; CHECK: insertps $48
-; CHECK: ret
+; X32-LABEL: pr20087:
+; X32: ## BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: insertps $48, 8(%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: pr20087:
+; X64: ## BB#0:
+; X64-NEXT: insertps $48, 8(%rdi), %xmm0
+; X64-NEXT: retq
%load = load <4 x float> *%ptr
%ret = shufflevector <4 x float> %load, <4 x float> %a, <4 x i32> <i32 4, i32 undef, i32 6, i32 2>
ret <4 x float> %ret
@@ -717,17 +994,26 @@ define <4 x float> @pr20087(<4 x float>
; Edge case for insertps where we end up with a shuffle with mask=<0, 7, -1, -1>
define void @insertps_pr20411(i32* noalias nocapture %RET) #1 {
-; CHECK-LABEL: insertps_pr20411:
-; CHECK: movaps {{[^,]*}}, %[[REG1:xmm.]]
-; CHECK: pshufd {{.*}} ## [[REG2:xmm.]] = mem[3,0,0,0]
-; CHECK: insertps {{.*}} ## xmm1 = [[REG2]][0],[[REG1]][3]{{.*}}
-
+; X32-LABEL: insertps_pr20411:
+; X32: ## BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movaps {{.*#+}} xmm0 = [4,5,6,7]
+; X32-NEXT: pshufd {{.*#+}} xmm1 = mem[3,0,0,0]
+; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm0[3],xmm1[2,3]
+; X32-NEXT: movups %xmm1, (%eax)
+; X32-NEXT: retl
+;
+; X64-LABEL: insertps_pr20411:
+; X64: ## BB#0:
+; X64-NEXT: movaps {{.*#+}} xmm0 = [4,5,6,7]
+; X64-NEXT: pshufd {{.*#+}} xmm1 = mem[3,0,0,0]
+; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm0[3],xmm1[2,3]
+; X64-NEXT: movups %xmm1, (%rdi)
+; X64-NEXT: retq
%gather_load = shufflevector <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, <8 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%shuffle109 = shufflevector <4 x i32> <i32 4, i32 5, i32 6, i32 7>, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> ; 4 5 6 7
-
%shuffle116 = shufflevector <8 x i32> %gather_load, <8 x i32> undef, <4 x i32> <i32 3, i32 undef, i32 undef, i32 undef> ; 3 x x x
%shuffle117 = shufflevector <4 x i32> %shuffle109, <4 x i32> %shuffle116, <4 x i32> <i32 4, i32 3, i32 undef, i32 undef> ; 3 7 x x
-
%ptrcast = bitcast i32* %RET to <4 x i32>*
store <4 x i32> %shuffle117, <4 x i32>* %ptrcast, align 4
ret void
More information about the llvm-commits
mailing list