[llvm] r283569 - [X86][SSE] Reapplied: Add vector fcopysign combine tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Oct 7 09:00:59 PDT 2016


Author: rksimon
Date: Fri Oct  7 11:00:59 2016
New Revision: 283569

URL: http://llvm.org/viewvc/llvm-project?rev=283569&view=rev
Log:
[X86][SSE] Reapplied: Add vector fcopysign combine tests

Now with better lowering and fix for PR30443

Added:
    llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll
      - copied, changed from r281904, llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll

Copied: llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll (from r281904, llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll?p2=llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll&p1=llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll&r1=281904&r2=283569&rev=283569&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-fcopysign.ll Fri Oct  7 11:00:59 2016
@@ -1,52 +1,29 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
 
+;
+; NOTE: this is generated by utils/update_llc_test_checks.py but we can't check NAN types (PR30443),
+; so we need to edit it to remove the NAN constant comments
+;
+
 ; copysign(x, c1) -> fabs(x) iff ispos(c1)
 define <4 x float> @combine_vec_fcopysign_pos_constant0(<4 x float> %x) {
 ; SSE-LABEL: combine_vec_fcopysign_pos_constant0:
 ; SSE:       # BB#0:
-; SSE-NEXT:    movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE-NEXT:    movaps {{.*#+}} xmm3 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
-; SSE-NEXT:    andps %xmm3, %xmm2
-; SSE-NEXT:    movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
-; SSE-NEXT:    andps {{.*}}(%rip), %xmm4
-; SSE-NEXT:    orps %xmm4, %xmm2
-; SSE-NEXT:    movaps %xmm0, %xmm1
-; SSE-NEXT:    andps %xmm3, %xmm1
-; SSE-NEXT:    orps %xmm4, %xmm1
-; SSE-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
-; SSE-NEXT:    movaps %xmm0, %xmm2
-; SSE-NEXT:    movhlps {{.*#+}} xmm2 = xmm2[1,1]
-; SSE-NEXT:    andps %xmm3, %xmm2
-; SSE-NEXT:    orps %xmm4, %xmm2
-; SSE-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE-NEXT:    andps %xmm3, %xmm0
-; SSE-NEXT:    orps %xmm4, %xmm0
-; SSE-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
-; SSE-NEXT:    movaps %xmm1, %xmm0
+; SSE-NEXT:    movaps {{.*#+}} xmm1 = [2.000000e+00,2.000000e+00,2.000000e+00,2.000000e+00]
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
+; SSE-NEXT:    orps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_pos_constant0:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT:    vmovaps {{.*#+}} xmm2 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
-; AVX-NEXT:    vandps %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; AVX-NEXT:    vandps {{.*}}(%rip), %xmm3, %xmm3
-; AVX-NEXT:    vorps %xmm3, %xmm1, %xmm1
-; AVX-NEXT:    vandps %xmm2, %xmm0, %xmm4
-; AVX-NEXT:    vorps %xmm3, %xmm4, %xmm4
-; AVX-NEXT:    vinsertps {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[2,3]
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
-; AVX-NEXT:    vandpd %xmm2, %xmm4, %xmm4
-; AVX-NEXT:    vorpd %xmm3, %xmm4, %xmm4
-; AVX-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm4[0],xmm1[3]
-; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
+; AVX-NEXT:    vandps %xmm1, %xmm2, %xmm1
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
 ; AVX-NEXT:    vandps %xmm2, %xmm0, %xmm0
-; AVX-NEXT:    vorps %xmm3, %xmm0, %xmm0
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %x, <4 x float> <float 2.0, float 2.0, float 2.0, float 2.0>)
   ret <4 x float> %1
@@ -55,61 +32,19 @@ define <4 x float> @combine_vec_fcopysig
 define <4 x float> @combine_vec_fcopysign_pos_constant1(<4 x float> %x) {
 ; SSE-LABEL: combine_vec_fcopysign_pos_constant1:
 ; SSE:       # BB#0:
-; SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; SSE-NEXT:    movaps {{.*#+}} xmm2 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
-; SSE-NEXT:    andps %xmm2, %xmm1
-; SSE-NEXT:    movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE-NEXT:    movaps {{.*#+}} xmm4 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
-; SSE-NEXT:    andps %xmm4, %xmm3
-; SSE-NEXT:    orps %xmm1, %xmm3
-; SSE-NEXT:    movaps %xmm0, %xmm1
-; SSE-NEXT:    andps %xmm4, %xmm1
-; SSE-NEXT:    xorps %xmm5, %xmm5
-; SSE-NEXT:    andps %xmm2, %xmm5
-; SSE-NEXT:    orps %xmm5, %xmm1
-; SSE-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[2,3]
-; SSE-NEXT:    movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; SSE-NEXT:    andps %xmm2, %xmm3
-; SSE-NEXT:    movaps %xmm0, %xmm5
-; SSE-NEXT:    movhlps {{.*#+}} xmm5 = xmm5[1,1]
-; SSE-NEXT:    andps %xmm4, %xmm5
-; SSE-NEXT:    orps %xmm3, %xmm5
-; SSE-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1],xmm5[0],xmm1[3]
-; SSE-NEXT:    movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; SSE-NEXT:    andps %xmm2, %xmm3
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE-NEXT:    andps %xmm4, %xmm0
-; SSE-NEXT:    orps %xmm3, %xmm0
-; SSE-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
-; SSE-NEXT:    movaps %xmm1, %xmm0
+; SSE-NEXT:    movaps {{.*#+}} xmm1 = [0.000000e+00,2.000000e+00,4.000000e+00,8.000000e+00]
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
+; SSE-NEXT:    orps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_pos_constant1:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; AVX-NEXT:    vmovaps {{.*#+}} xmm2 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
-; AVX-NEXT:    vandps %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX-NEXT:    vmovaps {{.*#+}} xmm4 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
-; AVX-NEXT:    vandps %xmm4, %xmm3, %xmm3
-; AVX-NEXT:    vorps %xmm1, %xmm3, %xmm1
-; AVX-NEXT:    vandps %xmm4, %xmm0, %xmm3
-; AVX-NEXT:    vxorps %xmm5, %xmm5, %xmm5
-; AVX-NEXT:    vandps %xmm2, %xmm5, %xmm5
-; AVX-NEXT:    vorps %xmm5, %xmm3, %xmm3
-; AVX-NEXT:    vinsertps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[2,3]
-; AVX-NEXT:    vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; AVX-NEXT:    vandps %xmm2, %xmm3, %xmm3
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm5 = xmm0[1,0]
-; AVX-NEXT:    vandpd %xmm4, %xmm5, %xmm5
-; AVX-NEXT:    vorpd %xmm3, %xmm5, %xmm3
-; AVX-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
-; AVX-NEXT:    vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; AVX-NEXT:    vandps %xmm2, %xmm3, %xmm2
-; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX-NEXT:    vandps %xmm4, %xmm0, %xmm0
-; AVX-NEXT:    vorps %xmm2, %xmm0, %xmm0
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
+; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
+; AVX-NEXT:    vandps {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %x, <4 x float> <float 0.0, float 2.0, float 4.0, float 8.0>)
   ret <4 x float> %1
@@ -135,47 +70,20 @@ define <4 x float> @combine_vec_fcopysig
 define <4 x float> @combine_vec_fcopysign_neg_constant0(<4 x float> %x) {
 ; SSE-LABEL: combine_vec_fcopysign_neg_constant0:
 ; SSE:       # BB#0:
-; SSE-NEXT:    movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE-NEXT:    movaps {{.*#+}} xmm3 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
-; SSE-NEXT:    andps %xmm3, %xmm2
-; SSE-NEXT:    movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
-; SSE-NEXT:    andps {{.*}}(%rip), %xmm4
-; SSE-NEXT:    orps %xmm4, %xmm2
-; SSE-NEXT:    movaps %xmm0, %xmm1
-; SSE-NEXT:    andps %xmm3, %xmm1
-; SSE-NEXT:    orps %xmm4, %xmm1
-; SSE-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
-; SSE-NEXT:    movaps %xmm0, %xmm2
-; SSE-NEXT:    movhlps {{.*#+}} xmm2 = xmm2[1,1]
-; SSE-NEXT:    andps %xmm3, %xmm2
-; SSE-NEXT:    orps %xmm4, %xmm2
-; SSE-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE-NEXT:    andps %xmm3, %xmm0
-; SSE-NEXT:    orps %xmm4, %xmm0
-; SSE-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
-; SSE-NEXT:    movaps %xmm1, %xmm0
+; SSE-NEXT:    movaps {{.*#+}} xmm1 = [-2.000000e+00,-2.000000e+00,-2.000000e+00,-2.000000e+00]
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
+; SSE-NEXT:    orps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_neg_constant0:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT:    vmovaps {{.*#+}} xmm2 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
-; AVX-NEXT:    vandps %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; AVX-NEXT:    vandps {{.*}}(%rip), %xmm3, %xmm3
-; AVX-NEXT:    vorps %xmm3, %xmm1, %xmm1
-; AVX-NEXT:    vandps %xmm2, %xmm0, %xmm4
-; AVX-NEXT:    vorps %xmm3, %xmm4, %xmm4
-; AVX-NEXT:    vinsertps {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[2,3]
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
-; AVX-NEXT:    vandpd %xmm2, %xmm4, %xmm4
-; AVX-NEXT:    vorpd %xmm3, %xmm4, %xmm4
-; AVX-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm4[0],xmm1[3]
-; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
+; AVX-NEXT:    vandps %xmm1, %xmm2, %xmm1
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
 ; AVX-NEXT:    vandps %xmm2, %xmm0, %xmm0
-; AVX-NEXT:    vorps %xmm3, %xmm0, %xmm0
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %x, <4 x float> <float -2.0, float -2.0, float -2.0, float -2.0>)
   ret <4 x float> %1
@@ -184,61 +92,19 @@ define <4 x float> @combine_vec_fcopysig
 define <4 x float> @combine_vec_fcopysign_neg_constant1(<4 x float> %x) {
 ; SSE-LABEL: combine_vec_fcopysign_neg_constant1:
 ; SSE:       # BB#0:
-; SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; SSE-NEXT:    movaps {{.*#+}} xmm2 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
-; SSE-NEXT:    andps %xmm2, %xmm1
-; SSE-NEXT:    movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE-NEXT:    movaps {{.*#+}} xmm4 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
-; SSE-NEXT:    andps %xmm4, %xmm3
-; SSE-NEXT:    orps %xmm1, %xmm3
-; SSE-NEXT:    movaps %xmm0, %xmm1
-; SSE-NEXT:    andps %xmm4, %xmm1
-; SSE-NEXT:    movss {{.*#+}} xmm5 = mem[0],zero,zero,zero
-; SSE-NEXT:    andps %xmm2, %xmm5
-; SSE-NEXT:    orps %xmm5, %xmm1
-; SSE-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[2,3]
-; SSE-NEXT:    movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; SSE-NEXT:    andps %xmm2, %xmm3
-; SSE-NEXT:    movaps %xmm0, %xmm5
-; SSE-NEXT:    movhlps {{.*#+}} xmm5 = xmm5[1,1]
-; SSE-NEXT:    andps %xmm4, %xmm5
-; SSE-NEXT:    orps %xmm3, %xmm5
-; SSE-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1],xmm5[0],xmm1[3]
-; SSE-NEXT:    movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; SSE-NEXT:    andps %xmm2, %xmm3
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE-NEXT:    andps %xmm4, %xmm0
-; SSE-NEXT:    orps %xmm3, %xmm0
-; SSE-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
-; SSE-NEXT:    movaps %xmm1, %xmm0
+; SSE-NEXT:    movaps {{.*#+}} xmm1 = [-0.000000e+00,-2.000000e+00,-4.000000e+00,-8.000000e+00]
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
+; SSE-NEXT:    orps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_neg_constant1:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; AVX-NEXT:    vmovaps {{.*#+}} xmm2 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
-; AVX-NEXT:    vandps %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX-NEXT:    vmovaps {{.*#+}} xmm4 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
-; AVX-NEXT:    vandps %xmm4, %xmm3, %xmm3
-; AVX-NEXT:    vorps %xmm1, %xmm3, %xmm1
-; AVX-NEXT:    vandps %xmm4, %xmm0, %xmm3
-; AVX-NEXT:    vmovss {{.*#+}} xmm5 = mem[0],zero,zero,zero
-; AVX-NEXT:    vandps %xmm2, %xmm5, %xmm5
-; AVX-NEXT:    vorps %xmm5, %xmm3, %xmm3
-; AVX-NEXT:    vinsertps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[2,3]
-; AVX-NEXT:    vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; AVX-NEXT:    vandps %xmm2, %xmm3, %xmm3
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm5 = xmm0[1,0]
-; AVX-NEXT:    vandpd %xmm4, %xmm5, %xmm5
-; AVX-NEXT:    vorpd %xmm3, %xmm5, %xmm3
-; AVX-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
-; AVX-NEXT:    vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; AVX-NEXT:    vandps %xmm2, %xmm3, %xmm2
-; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX-NEXT:    vandps %xmm4, %xmm0, %xmm0
-; AVX-NEXT:    vorps %xmm2, %xmm0, %xmm0
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
+; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
+; AVX-NEXT:    vandps {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %x, <4 x float> <float -0.0, float -2.0, float -4.0, float -8.0>)
   ret <4 x float> %1
@@ -247,64 +113,21 @@ define <4 x float> @combine_vec_fcopysig
 define <4 x float> @combine_vec_fcopysign_fneg_fabs_sgn(<4 x float> %x, <4 x float> %y) {
 ; SSE-LABEL: combine_vec_fcopysign_fneg_fabs_sgn:
 ; SSE:       # BB#0:
-; SSE-NEXT:    orps {{.*}}(%rip), %xmm1
-; SSE-NEXT:    movaps {{.*#+}} xmm4 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
-; SSE-NEXT:    movaps %xmm0, %xmm2
-; SSE-NEXT:    andps %xmm4, %xmm2
-; SSE-NEXT:    movaps {{.*#+}} xmm3 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
-; SSE-NEXT:    movaps %xmm1, %xmm5
-; SSE-NEXT:    andps %xmm3, %xmm5
-; SSE-NEXT:    orps %xmm5, %xmm2
-; SSE-NEXT:    movshdup {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE-NEXT:    andps %xmm4, %xmm5
-; SSE-NEXT:    movshdup {{.*#+}} xmm6 = xmm1[1,1,3,3]
-; SSE-NEXT:    andps %xmm3, %xmm6
-; SSE-NEXT:    orps %xmm5, %xmm6
-; SSE-NEXT:    insertps {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[2,3]
-; SSE-NEXT:    movaps %xmm0, %xmm5
-; SSE-NEXT:    movhlps {{.*#+}} xmm5 = xmm5[1,1]
-; SSE-NEXT:    andps %xmm4, %xmm5
-; SSE-NEXT:    movaps %xmm1, %xmm6
-; SSE-NEXT:    movhlps {{.*#+}} xmm6 = xmm6[1,1]
-; SSE-NEXT:    andps %xmm3, %xmm6
-; SSE-NEXT:    orps %xmm5, %xmm6
-; SSE-NEXT:    insertps {{.*#+}} xmm2 = xmm2[0,1],xmm6[0],xmm2[3]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE-NEXT:    andps %xmm4, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
-; SSE-NEXT:    andps %xmm3, %xmm1
-; SSE-NEXT:    orps %xmm0, %xmm1
-; SSE-NEXT:    insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[0]
-; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    movaps {{.*#+}} xmm2 = [-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00]
+; SSE-NEXT:    orps %xmm2, %xmm1
+; SSE-NEXT:    andps %xmm2, %xmm1
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
+; SSE-NEXT:    orps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_fneg_fabs_sgn:
 ; AVX:       # BB#0:
 ; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
 ; AVX-NEXT:    vorps %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vmovaps {{.*#+}} xmm2 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
-; AVX-NEXT:    vandps %xmm2, %xmm0, %xmm3
-; AVX-NEXT:    vmovaps {{.*#+}} xmm4 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
-; AVX-NEXT:    vandps %xmm4, %xmm1, %xmm5
-; AVX-NEXT:    vorps %xmm5, %xmm3, %xmm3
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; AVX-NEXT:    vandps %xmm2, %xmm5, %xmm5
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm6 = xmm1[1,1,3,3]
-; AVX-NEXT:    vandps %xmm4, %xmm6, %xmm6
-; AVX-NEXT:    vorps %xmm6, %xmm5, %xmm5
-; AVX-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[2,3]
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm5 = xmm0[1,0]
-; AVX-NEXT:    vandpd %xmm2, %xmm5, %xmm5
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm6 = xmm1[1,0]
-; AVX-NEXT:    vandpd %xmm4, %xmm6, %xmm6
-; AVX-NEXT:    vorpd %xmm6, %xmm5, %xmm5
-; AVX-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm5[0],xmm3[3]
-; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX-NEXT:    vandps %xmm2, %xmm0, %xmm0
-; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
-; AVX-NEXT:    vandps %xmm4, %xmm1, %xmm1
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm3
+; AVX-NEXT:    vandps %xmm3, %xmm0, %xmm0
+; AVX-NEXT:    vandps %xmm2, %xmm1, %xmm1
 ; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
 ; AVX-NEXT:    retq
   %1 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %y)
   %2 = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %1
@@ -316,61 +139,18 @@ define <4 x float> @combine_vec_fcopysig
 define <4 x float> @combine_vec_fcopysign_fabs_mag(<4 x float> %x, <4 x float> %y) {
 ; SSE-LABEL: combine_vec_fcopysign_fabs_mag:
 ; SSE:       # BB#0:
-; SSE-NEXT:    movaps {{.*#+}} xmm4 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
-; SSE-NEXT:    movaps %xmm1, %xmm5
-; SSE-NEXT:    andps %xmm4, %xmm5
-; SSE-NEXT:    movaps {{.*#+}} xmm3 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
-; SSE-NEXT:    movaps %xmm0, %xmm2
-; SSE-NEXT:    andps %xmm3, %xmm2
-; SSE-NEXT:    orps %xmm5, %xmm2
-; SSE-NEXT:    movshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
-; SSE-NEXT:    andps %xmm4, %xmm5
-; SSE-NEXT:    movshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE-NEXT:    andps %xmm3, %xmm6
-; SSE-NEXT:    orps %xmm5, %xmm6
-; SSE-NEXT:    insertps {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[2,3]
-; SSE-NEXT:    movaps %xmm1, %xmm5
-; SSE-NEXT:    movhlps {{.*#+}} xmm5 = xmm5[1,1]
-; SSE-NEXT:    andps %xmm4, %xmm5
-; SSE-NEXT:    movaps %xmm0, %xmm6
-; SSE-NEXT:    movhlps {{.*#+}} xmm6 = xmm6[1,1]
-; SSE-NEXT:    andps %xmm3, %xmm6
-; SSE-NEXT:    orps %xmm5, %xmm6
-; SSE-NEXT:    insertps {{.*#+}} xmm2 = xmm2[0,1],xmm6[0],xmm2[3]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
-; SSE-NEXT:    andps %xmm4, %xmm1
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE-NEXT:    andps %xmm3, %xmm0
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    orps %xmm1, %xmm0
-; SSE-NEXT:    insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm0[0]
-; SSE-NEXT:    movaps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_fabs_mag:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vmovaps {{.*#+}} xmm2 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
-; AVX-NEXT:    vandps %xmm2, %xmm1, %xmm3
-; AVX-NEXT:    vmovaps {{.*#+}} xmm4 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
-; AVX-NEXT:    vandps %xmm4, %xmm0, %xmm5
-; AVX-NEXT:    vorps %xmm3, %xmm5, %xmm3
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
-; AVX-NEXT:    vandps %xmm2, %xmm5, %xmm5
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; AVX-NEXT:    vandps %xmm4, %xmm6, %xmm6
-; AVX-NEXT:    vorps %xmm5, %xmm6, %xmm5
-; AVX-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[2,3]
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm5 = xmm1[1,0]
-; AVX-NEXT:    vandpd %xmm2, %xmm5, %xmm5
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
-; AVX-NEXT:    vandpd %xmm4, %xmm6, %xmm6
-; AVX-NEXT:    vorpd %xmm5, %xmm6, %xmm5
-; AVX-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm5[0],xmm3[3]
-; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
 ; AVX-NEXT:    vandps %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX-NEXT:    vandps %xmm4, %xmm0, %xmm0
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
+; AVX-NEXT:    vandps %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
 ; AVX-NEXT:    retq
   %1 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %x)
   %2 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %1, <4 x float> %y)
@@ -381,61 +161,18 @@ define <4 x float> @combine_vec_fcopysig
 define <4 x float> @combine_vec_fcopysign_fneg_mag(<4 x float> %x, <4 x float> %y) {
 ; SSE-LABEL: combine_vec_fcopysign_fneg_mag:
 ; SSE:       # BB#0:
-; SSE-NEXT:    movaps {{.*#+}} xmm4 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
-; SSE-NEXT:    movaps %xmm1, %xmm5
-; SSE-NEXT:    andps %xmm4, %xmm5
-; SSE-NEXT:    movaps {{.*#+}} xmm3 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
-; SSE-NEXT:    movaps %xmm0, %xmm2
-; SSE-NEXT:    andps %xmm3, %xmm2
-; SSE-NEXT:    orps %xmm5, %xmm2
-; SSE-NEXT:    movshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
-; SSE-NEXT:    andps %xmm4, %xmm5
-; SSE-NEXT:    movshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE-NEXT:    andps %xmm3, %xmm6
-; SSE-NEXT:    orps %xmm5, %xmm6
-; SSE-NEXT:    insertps {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[2,3]
-; SSE-NEXT:    movaps %xmm1, %xmm5
-; SSE-NEXT:    movhlps {{.*#+}} xmm5 = xmm5[1,1]
-; SSE-NEXT:    andps %xmm4, %xmm5
-; SSE-NEXT:    movaps %xmm0, %xmm6
-; SSE-NEXT:    movhlps {{.*#+}} xmm6 = xmm6[1,1]
-; SSE-NEXT:    andps %xmm3, %xmm6
-; SSE-NEXT:    orps %xmm5, %xmm6
-; SSE-NEXT:    insertps {{.*#+}} xmm2 = xmm2[0,1],xmm6[0],xmm2[3]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
-; SSE-NEXT:    andps %xmm4, %xmm1
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE-NEXT:    andps %xmm3, %xmm0
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    orps %xmm1, %xmm0
-; SSE-NEXT:    insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm0[0]
-; SSE-NEXT:    movaps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_fneg_mag:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vmovaps {{.*#+}} xmm2 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
-; AVX-NEXT:    vandps %xmm2, %xmm1, %xmm3
-; AVX-NEXT:    vmovaps {{.*#+}} xmm4 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
-; AVX-NEXT:    vandps %xmm4, %xmm0, %xmm5
-; AVX-NEXT:    vorps %xmm3, %xmm5, %xmm3
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
-; AVX-NEXT:    vandps %xmm2, %xmm5, %xmm5
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; AVX-NEXT:    vandps %xmm4, %xmm6, %xmm6
-; AVX-NEXT:    vorps %xmm5, %xmm6, %xmm5
-; AVX-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[2,3]
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm5 = xmm1[1,0]
-; AVX-NEXT:    vandpd %xmm2, %xmm5, %xmm5
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
-; AVX-NEXT:    vandpd %xmm4, %xmm6, %xmm6
-; AVX-NEXT:    vorpd %xmm5, %xmm6, %xmm5
-; AVX-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm5[0],xmm3[3]
-; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
 ; AVX-NEXT:    vandps %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX-NEXT:    vandps %xmm4, %xmm0, %xmm0
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
+; AVX-NEXT:    vandps %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
 ; AVX-NEXT:    retq
   %1 = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %x
   %2 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %1, <4 x float> %y)
@@ -446,61 +183,18 @@ define <4 x float> @combine_vec_fcopysig
 define <4 x float> @combine_vec_fcopysign_fcopysign_mag(<4 x float> %x, <4 x float> %y, <4 x float> %z) {
 ; SSE-LABEL: combine_vec_fcopysign_fcopysign_mag:
 ; SSE:       # BB#0:
-; SSE-NEXT:    movaps {{.*#+}} xmm4 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
-; SSE-NEXT:    movaps %xmm1, %xmm5
-; SSE-NEXT:    andps %xmm4, %xmm5
-; SSE-NEXT:    movaps {{.*#+}} xmm3 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
-; SSE-NEXT:    movaps %xmm0, %xmm2
-; SSE-NEXT:    andps %xmm3, %xmm2
-; SSE-NEXT:    orps %xmm5, %xmm2
-; SSE-NEXT:    movshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
-; SSE-NEXT:    andps %xmm4, %xmm5
-; SSE-NEXT:    movshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE-NEXT:    andps %xmm3, %xmm6
-; SSE-NEXT:    orps %xmm5, %xmm6
-; SSE-NEXT:    insertps {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[2,3]
-; SSE-NEXT:    movaps %xmm1, %xmm5
-; SSE-NEXT:    movhlps {{.*#+}} xmm5 = xmm5[1,1]
-; SSE-NEXT:    andps %xmm4, %xmm5
-; SSE-NEXT:    movaps %xmm0, %xmm6
-; SSE-NEXT:    movhlps {{.*#+}} xmm6 = xmm6[1,1]
-; SSE-NEXT:    andps %xmm3, %xmm6
-; SSE-NEXT:    orps %xmm5, %xmm6
-; SSE-NEXT:    insertps {{.*#+}} xmm2 = xmm2[0,1],xmm6[0],xmm2[3]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
-; SSE-NEXT:    andps %xmm4, %xmm1
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE-NEXT:    andps %xmm3, %xmm0
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    orps %xmm1, %xmm0
-; SSE-NEXT:    insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm0[0]
-; SSE-NEXT:    movaps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_fcopysign_mag:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vmovaps {{.*#+}} xmm2 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
-; AVX-NEXT:    vandps %xmm2, %xmm1, %xmm3
-; AVX-NEXT:    vmovaps {{.*#+}} xmm4 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
-; AVX-NEXT:    vandps %xmm4, %xmm0, %xmm5
-; AVX-NEXT:    vorps %xmm3, %xmm5, %xmm3
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
-; AVX-NEXT:    vandps %xmm2, %xmm5, %xmm5
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; AVX-NEXT:    vandps %xmm4, %xmm6, %xmm6
-; AVX-NEXT:    vorps %xmm5, %xmm6, %xmm5
-; AVX-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[2,3]
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm5 = xmm1[1,0]
-; AVX-NEXT:    vandpd %xmm2, %xmm5, %xmm5
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
-; AVX-NEXT:    vandpd %xmm4, %xmm6, %xmm6
-; AVX-NEXT:    vorpd %xmm5, %xmm6, %xmm5
-; AVX-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm5[0],xmm3[3]
-; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
 ; AVX-NEXT:    vandps %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX-NEXT:    vandps %xmm4, %xmm0, %xmm0
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
+; AVX-NEXT:    vandps %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
 ; AVX-NEXT:    retq
   %1 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %x, <4 x float> %z)
   %2 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %1, <4 x float> %y)
@@ -511,61 +205,18 @@ define <4 x float> @combine_vec_fcopysig
 define <4 x float> @combine_vec_fcopysign_fcopysign_sgn(<4 x float> %x, <4 x float> %y, <4 x float> %z) {
 ; SSE-LABEL: combine_vec_fcopysign_fcopysign_sgn:
 ; SSE:       # BB#0:
-; SSE-NEXT:    movaps {{.*#+}} xmm4 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
-; SSE-NEXT:    movaps %xmm2, %xmm5
-; SSE-NEXT:    andps %xmm4, %xmm5
-; SSE-NEXT:    movaps {{.*#+}} xmm3 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
-; SSE-NEXT:    movaps %xmm0, %xmm1
-; SSE-NEXT:    andps %xmm3, %xmm1
-; SSE-NEXT:    orps %xmm5, %xmm1
-; SSE-NEXT:    movshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
-; SSE-NEXT:    andps %xmm4, %xmm5
-; SSE-NEXT:    movshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; SSE-NEXT:    andps %xmm3, %xmm6
-; SSE-NEXT:    orps %xmm5, %xmm6
-; SSE-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[2,3]
-; SSE-NEXT:    movaps %xmm2, %xmm5
-; SSE-NEXT:    movhlps {{.*#+}} xmm5 = xmm5[1,1]
-; SSE-NEXT:    andps %xmm4, %xmm5
-; SSE-NEXT:    movaps %xmm0, %xmm6
-; SSE-NEXT:    movhlps {{.*#+}} xmm6 = xmm6[1,1]
-; SSE-NEXT:    andps %xmm3, %xmm6
-; SSE-NEXT:    orps %xmm5, %xmm6
-; SSE-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1],xmm6[0],xmm1[3]
-; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
-; SSE-NEXT:    andps %xmm4, %xmm2
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE-NEXT:    andps %xmm3, %xmm0
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm2
+; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
 ; SSE-NEXT:    orps %xmm2, %xmm0
-; SSE-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
-; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_fcopysign_sgn:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vmovaps {{.*#+}} xmm1 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
-; AVX-NEXT:    vandps %xmm1, %xmm2, %xmm3
-; AVX-NEXT:    vmovaps {{.*#+}} xmm4 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
-; AVX-NEXT:    vandps %xmm4, %xmm0, %xmm5
-; AVX-NEXT:    vorps %xmm3, %xmm5, %xmm3
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
-; AVX-NEXT:    vandps %xmm1, %xmm5, %xmm5
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; AVX-NEXT:    vandps %xmm4, %xmm6, %xmm6
-; AVX-NEXT:    vorps %xmm5, %xmm6, %xmm5
-; AVX-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[2,3]
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm5 = xmm2[1,0]
-; AVX-NEXT:    vandpd %xmm1, %xmm5, %xmm5
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
-; AVX-NEXT:    vandpd %xmm4, %xmm6, %xmm6
-; AVX-NEXT:    vorpd %xmm5, %xmm6, %xmm5
-; AVX-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm5[0],xmm3[3]
-; AVX-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
 ; AVX-NEXT:    vandps %xmm1, %xmm2, %xmm1
-; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX-NEXT:    vandps %xmm4, %xmm0, %xmm0
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
+; AVX-NEXT:    vandps %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
 ; AVX-NEXT:    retq
   %1 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %y, <4 x float> %z)
   %2 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %x, <4 x float> %1)
@@ -582,10 +233,10 @@ define <4 x double> @combine_vec_fcopysi
 ; SSE-NEXT:    movaps %xmm2, %xmm6
 ; SSE-NEXT:    movhlps {{.*#+}} xmm6 = xmm6[1,1]
 ; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
-; SSE-NEXT:    movaps {{.*#+}} xmm7 = [nan,0.000000e+00]
+; SSE-NEXT:    movaps {{.*#+}} xmm7
 ; SSE-NEXT:    movaps %xmm0, %xmm2
 ; SSE-NEXT:    andps %xmm7, %xmm2
-; SSE-NEXT:    movaps {{.*#+}} xmm8 = [-0.000000e+00,0.000000e+00]
+; SSE-NEXT:    movaps {{.*#+}} xmm8 = [-0.000000e+00,-0.000000e+00]
 ; SSE-NEXT:    andps %xmm8, %xmm4
 ; SSE-NEXT:    orps %xmm4, %xmm2
 ; SSE-NEXT:    movhlps {{.*#+}} xmm0 = xmm0[1,1]
@@ -612,33 +263,12 @@ define <4 x double> @combine_vec_fcopysi
 ;
 ; AVX-LABEL: combine_vec_fcopysign_fpext_sgn:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
-; AVX-NEXT:    vmovapd {{.*#+}} xmm4 = [nan,0.000000e+00]
-; AVX-NEXT:    vandpd %xmm4, %xmm3, %xmm3
-; AVX-NEXT:    vpermilps {{.*#+}} xmm5 = xmm1[3,1,2,3]
-; AVX-NEXT:    vcvtss2sd %xmm5, %xmm5, %xmm5
-; AVX-NEXT:    vmovapd {{.*#+}} xmm6 = [-0.000000e+00,0.000000e+00]
-; AVX-NEXT:    vandpd %xmm6, %xmm5, %xmm5
-; AVX-NEXT:    vorpd %xmm5, %xmm3, %xmm3
-; AVX-NEXT:    vandpd %xmm4, %xmm2, %xmm2
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm5 = xmm1[1,0]
-; AVX-NEXT:    vcvtss2sd %xmm5, %xmm5, %xmm5
-; AVX-NEXT:    vandpd %xmm6, %xmm5, %xmm5
-; AVX-NEXT:    vorpd %xmm5, %xmm2, %xmm2
-; AVX-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; AVX-NEXT:    vandpd %xmm4, %xmm0, %xmm3
-; AVX-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm5
-; AVX-NEXT:    vandpd %xmm6, %xmm5, %xmm5
-; AVX-NEXT:    vorpd %xmm5, %xmm3, %xmm3
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX-NEXT:    vandpd %xmm4, %xmm0, %xmm0
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; AVX-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
-; AVX-NEXT:    vandpd %xmm6, %xmm1, %xmm1
-; AVX-NEXT:    vorpd %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm3[0],xmm0[0]
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT:    vbroadcastsd {{.*}}(%rip), %ymm2
+; AVX-NEXT:    vandps %ymm2, %ymm0, %ymm0
+; AVX-NEXT:    vcvtps2pd %xmm1, %ymm1
+; AVX-NEXT:    vbroadcastsd {{.*}}(%rip), %ymm2
+; AVX-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX-NEXT:    vorps %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %1 = fpext <4 x float> %y to <4 x double>
   %2 = call <4 x double> @llvm.copysign.v4f64(<4 x double> %x, <4 x double> %1)
@@ -650,10 +280,10 @@ define <4 x float> @combine_vec_fcopysig
 ; SSE-LABEL: combine_vec_fcopysign_fptrunc_sgn:
 ; SSE:       # BB#0:
 ; SSE-NEXT:    movaps %xmm0, %xmm3
-; SSE-NEXT:    movaps {{.*#+}} xmm5 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
+; SSE-NEXT:    movaps {{.*#+}} xmm5
 ; SSE-NEXT:    andps %xmm5, %xmm0
 ; SSE-NEXT:    cvtsd2ss %xmm1, %xmm6
-; SSE-NEXT:    movaps {{.*#+}} xmm4 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
+; SSE-NEXT:    movaps {{.*#+}} xmm4 = [-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00]
 ; SSE-NEXT:    andps %xmm4, %xmm6
 ; SSE-NEXT:    orps %xmm6, %xmm0
 ; SSE-NEXT:    movshdup {{.*#+}} xmm6 = xmm3[1,1,3,3]
@@ -683,33 +313,12 @@ define <4 x float> @combine_vec_fcopysig
 ;
 ; AVX-LABEL: combine_vec_fcopysign_fptrunc_sgn:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vmovaps {{.*#+}} xmm2 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
-; AVX-NEXT:    vandps %xmm2, %xmm0, %xmm3
-; AVX-NEXT:    vcvtsd2ss %xmm1, %xmm1, %xmm4
-; AVX-NEXT:    vmovaps {{.*#+}} xmm5 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
-; AVX-NEXT:    vandps %xmm5, %xmm4, %xmm4
-; AVX-NEXT:    vorps %xmm4, %xmm3, %xmm3
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; AVX-NEXT:    vandps %xmm2, %xmm4, %xmm4
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm6 = xmm1[1,0]
-; AVX-NEXT:    vcvtsd2ss %xmm6, %xmm6, %xmm6
-; AVX-NEXT:    vandps %xmm5, %xmm6, %xmm6
-; AVX-NEXT:    vorps %xmm6, %xmm4, %xmm4
-; AVX-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[2,3]
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
-; AVX-NEXT:    vandpd %xmm2, %xmm4, %xmm4
-; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT:    vcvtsd2ss %xmm1, %xmm1, %xmm6
-; AVX-NEXT:    vandps %xmm5, %xmm6, %xmm6
-; AVX-NEXT:    vorpd %xmm6, %xmm4, %xmm4
-; AVX-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
-; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX-NEXT:    vandps %xmm2, %xmm0, %xmm0
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
-; AVX-NEXT:    vcvtsd2ss %xmm1, %xmm1, %xmm1
-; AVX-NEXT:    vandps %xmm5, %xmm1, %xmm1
-; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
+; AVX-NEXT:    vandpd %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vcvtpd2psy %ymm1, %xmm1
+; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm2
+; AVX-NEXT:    vandpd %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vorpd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
   %1 = fptrunc <4 x double> %y to <4 x float>




More information about the llvm-commits mailing list