[llvm] r257992 - [X86][SSE] Regenerated HADD/HSUB tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Jan 16 06:03:41 PST 2016


Author: rksimon
Date: Sat Jan 16 08:03:40 2016
New Revision: 257992

URL: http://llvm.org/viewvc/llvm-project?rev=257992&view=rev
Log:
[X86][SSE] Regenerated HADD/HSUB tests

Modified:
    llvm/trunk/test/CodeGen/X86/haddsub-2.ll
    llvm/trunk/test/CodeGen/X86/haddsub-undef.ll
    llvm/trunk/test/CodeGen/X86/haddsub.ll
    llvm/trunk/test/CodeGen/X86/phaddsub.ll

Modified: llvm/trunk/test/CodeGen/X86/haddsub-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/haddsub-2.ll?rev=257992&r1=257991&r2=257992&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/haddsub-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/haddsub-2.ll Sat Jan 16 08:03:40 2016
@@ -1,11 +1,19 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse2,+sse3 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE3
-; RUN: llc < %s -march=x86-64 -mattr=+sse2,+sse3,+ssse3 | FileCheck %s -check-prefix=CHECK -check-prefix=SSSE3
-; RUN: llc < %s -march=x86-64 -mattr=+avx | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
-; RUN: llc < %s -march=x86-64 -mattr=+avx2 | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2
-
-
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2,+sse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSE3
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2,+sse3,+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
 
 define <4 x float> @hadd_ps_test1(<4 x float> %A, <4 x float> %B) {
+; SSE-LABEL: hadd_ps_test1:
+; SSE:       # BB#0:
+; SSE-NEXT:    haddps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: hadd_ps_test1:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %A, i32 0
   %vecext1 = extractelement <4 x float> %A, i32 1
   %add = fadd float %vecext, %vecext1
@@ -24,12 +32,17 @@ define <4 x float> @hadd_ps_test1(<4 x f
   %vecinit13 = insertelement <4 x float> %vecinit9, float %add12, i32 3
   ret <4 x float> %vecinit13
 }
-; CHECK-LABEL: hadd_ps_test1
-; CHECK: haddps
-; CHECK-NEXT: ret
-
 
 define <4 x float> @hadd_ps_test2(<4 x float> %A, <4 x float> %B) {
+; SSE-LABEL: hadd_ps_test2:
+; SSE:       # BB#0:
+; SSE-NEXT:    haddps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: hadd_ps_test2:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %A, i32 2
   %vecext1 = extractelement <4 x float> %A, i32 3
   %add = fadd float %vecext, %vecext1
@@ -48,12 +61,17 @@ define <4 x float> @hadd_ps_test2(<4 x f
   %vecinit13 = insertelement <4 x float> %vecinit9, float %add12, i32 2
   ret <4 x float> %vecinit13
 }
-; CHECK-LABEL: hadd_ps_test2
-; CHECK: haddps
-; CHECK-NEXT: ret
-
 
 define <4 x float> @hsub_ps_test1(<4 x float> %A, <4 x float> %B) {
+; SSE-LABEL: hsub_ps_test1:
+; SSE:       # BB#0:
+; SSE-NEXT:    hsubps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: hsub_ps_test1:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhsubps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %A, i32 0
   %vecext1 = extractelement <4 x float> %A, i32 1
   %sub = fsub float %vecext, %vecext1
@@ -72,12 +90,17 @@ define <4 x float> @hsub_ps_test1(<4 x f
   %vecinit13 = insertelement <4 x float> %vecinit9, float %sub12, i32 3
   ret <4 x float> %vecinit13
 }
-; CHECK-LABEL: hsub_ps_test1
-; CHECK: hsubps
-; CHECK-NEXT: ret
-
 
 define <4 x float> @hsub_ps_test2(<4 x float> %A, <4 x float> %B) {
+; SSE-LABEL: hsub_ps_test2:
+; SSE:       # BB#0:
+; SSE-NEXT:    hsubps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: hsub_ps_test2:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhsubps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %A, i32 2
   %vecext1 = extractelement <4 x float> %A, i32 3
   %sub = fsub float %vecext, %vecext1
@@ -96,12 +119,46 @@ define <4 x float> @hsub_ps_test2(<4 x f
   %vecinit13 = insertelement <4 x float> %vecinit9, float %sub12, i32 2
   ret <4 x float> %vecinit13
 }
-; CHECK-LABEL: hsub_ps_test2
-; CHECK: hsubps
-; CHECK-NEXT: ret
-
 
 define <4 x i32> @phadd_d_test1(<4 x i32> %A, <4 x i32> %B) {
+; SSE3-LABEL: phadd_d_test1:
+; SSE3:       # BB#0:
+; SSE3-NEXT:    movd %xmm0, %eax
+; SSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSE3-NEXT:    movd %xmm2, %ecx
+; SSE3-NEXT:    addl %eax, %ecx
+; SSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE3-NEXT:    movd %xmm2, %eax
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %edx
+; SSE3-NEXT:    addl %eax, %edx
+; SSE3-NEXT:    movd %xmm1, %eax
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %esi
+; SSE3-NEXT:    addl %eax, %esi
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE3-NEXT:    movd %xmm0, %eax
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %edi
+; SSE3-NEXT:    addl %eax, %edi
+; SSE3-NEXT:    movd %edi, %xmm0
+; SSE3-NEXT:    movd %edx, %xmm1
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE3-NEXT:    movd %esi, %xmm2
+; SSE3-NEXT:    movd %ecx, %xmm0
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE3-NEXT:    retq
+;
+; SSSE3-LABEL: phadd_d_test1:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phaddd %xmm1, %xmm0
+; SSSE3-NEXT:    retq
+;
+; AVX-LABEL: phadd_d_test1:
+; AVX:       # BB#0:
+; AVX-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x i32> %A, i32 0
   %vecext1 = extractelement <4 x i32> %A, i32 1
   %add = add i32 %vecext, %vecext1
@@ -120,15 +177,46 @@ define <4 x i32> @phadd_d_test1(<4 x i32
   %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %add12, i32 3
   ret <4 x i32> %vecinit13
 }
-; CHECK-LABEL: phadd_d_test1
-; SSE3-NOT: phaddd
-; SSSE3: phaddd
-; AVX: vphaddd
-; AVX2 vphaddd
-; CHECK: ret
-
 
 define <4 x i32> @phadd_d_test2(<4 x i32> %A, <4 x i32> %B) {
+; SSE3-LABEL: phadd_d_test2:
+; SSE3:       # BB#0:
+; SSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE3-NEXT:    movd %xmm2, %eax
+; SSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
+; SSE3-NEXT:    movd %xmm2, %ecx
+; SSE3-NEXT:    addl %eax, %ecx
+; SSE3-NEXT:    movd %xmm0, %eax
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %edx
+; SSE3-NEXT:    addl %eax, %edx
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %eax
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE3-NEXT:    movd %xmm0, %esi
+; SSE3-NEXT:    addl %eax, %esi
+; SSE3-NEXT:    movd %esi, %xmm0
+; SSE3-NEXT:    movd %ecx, %xmm2
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %eax
+; SSE3-NEXT:    movd %xmm1, %ecx
+; SSE3-NEXT:    addl %eax, %ecx
+; SSE3-NEXT:    movd %ecx, %xmm1
+; SSE3-NEXT:    movd %edx, %xmm0
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE3-NEXT:    retq
+;
+; SSSE3-LABEL: phadd_d_test2:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phaddd %xmm1, %xmm0
+; SSSE3-NEXT:    retq
+;
+; AVX-LABEL: phadd_d_test2:
+; AVX:       # BB#0:
+; AVX-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x i32> %A, i32 2
   %vecext1 = extractelement <4 x i32> %A, i32 3
   %add = add i32 %vecext, %vecext1
@@ -147,15 +235,46 @@ define <4 x i32> @phadd_d_test2(<4 x i32
   %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %add12, i32 2
   ret <4 x i32> %vecinit13
 }
-; CHECK-LABEL: phadd_d_test2
-; SSE3-NOT: phaddd
-; SSSE3: phaddd
-; AVX: vphaddd
-; AVX2 vphaddd
-; CHECK: ret
-
 
 define <4 x i32> @phsub_d_test1(<4 x i32> %A, <4 x i32> %B) {
+; SSE3-LABEL: phsub_d_test1:
+; SSE3:       # BB#0:
+; SSE3-NEXT:    movd %xmm0, %eax
+; SSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSE3-NEXT:    movd %xmm2, %ecx
+; SSE3-NEXT:    subl %ecx, %eax
+; SSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE3-NEXT:    movd %xmm2, %ecx
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %edx
+; SSE3-NEXT:    subl %edx, %ecx
+; SSE3-NEXT:    movd %xmm1, %edx
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %esi
+; SSE3-NEXT:    subl %esi, %edx
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE3-NEXT:    movd %xmm0, %esi
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %edi
+; SSE3-NEXT:    subl %edi, %esi
+; SSE3-NEXT:    movd %esi, %xmm0
+; SSE3-NEXT:    movd %ecx, %xmm1
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE3-NEXT:    movd %edx, %xmm2
+; SSE3-NEXT:    movd %eax, %xmm0
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE3-NEXT:    retq
+;
+; SSSE3-LABEL: phsub_d_test1:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phsubd %xmm1, %xmm0
+; SSSE3-NEXT:    retq
+;
+; AVX-LABEL: phsub_d_test1:
+; AVX:       # BB#0:
+; AVX-NEXT:    vphsubd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x i32> %A, i32 0
   %vecext1 = extractelement <4 x i32> %A, i32 1
   %sub = sub i32 %vecext, %vecext1
@@ -174,15 +293,46 @@ define <4 x i32> @phsub_d_test1(<4 x i32
   %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %sub12, i32 3
   ret <4 x i32> %vecinit13
 }
-; CHECK-LABEL: phsub_d_test1
-; SSE3-NOT: phsubd
-; SSSE3: phsubd
-; AVX: vphsubd
-; AVX2 vphsubd
-; CHECK: ret
-
 
 define <4 x i32> @phsub_d_test2(<4 x i32> %A, <4 x i32> %B) {
+; SSE3-LABEL: phsub_d_test2:
+; SSE3:       # BB#0:
+; SSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE3-NEXT:    movd %xmm2, %eax
+; SSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
+; SSE3-NEXT:    movd %xmm2, %ecx
+; SSE3-NEXT:    subl %ecx, %eax
+; SSE3-NEXT:    movd %xmm0, %ecx
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %edx
+; SSE3-NEXT:    subl %edx, %ecx
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE3-NEXT:    movd %xmm0, %edx
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %esi
+; SSE3-NEXT:    subl %esi, %edx
+; SSE3-NEXT:    movd %edx, %xmm0
+; SSE3-NEXT:    movd %eax, %xmm2
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE3-NEXT:    movd %xmm1, %eax
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %edx
+; SSE3-NEXT:    subl %edx, %eax
+; SSE3-NEXT:    movd %eax, %xmm1
+; SSE3-NEXT:    movd %ecx, %xmm0
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE3-NEXT:    retq
+;
+; SSSE3-LABEL: phsub_d_test2:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phsubd %xmm1, %xmm0
+; SSSE3-NEXT:    retq
+;
+; AVX-LABEL: phsub_d_test2:
+; AVX:       # BB#0:
+; AVX-NEXT:    vphsubd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x i32> %A, i32 2
   %vecext1 = extractelement <4 x i32> %A, i32 3
   %sub = sub i32 %vecext, %vecext1
@@ -201,15 +351,17 @@ define <4 x i32> @phsub_d_test2(<4 x i32
   %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %sub12, i32 2
   ret <4 x i32> %vecinit13
 }
-; CHECK-LABEL: phsub_d_test2
-; SSE3-NOT: phsubd
-; SSSE3: phsubd
-; AVX: vphsubd
-; AVX2 vphsubd
-; CHECK: ret
-
 
 define <2 x double> @hadd_pd_test1(<2 x double> %A, <2 x double> %B) {
+; SSE-LABEL: hadd_pd_test1:
+; SSE:       # BB#0:
+; SSE-NEXT:    haddpd %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: hadd_pd_test1:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <2 x double> %A, i32 0
   %vecext1 = extractelement <2 x double> %A, i32 1
   %add = fadd double %vecext, %vecext1
@@ -220,12 +372,17 @@ define <2 x double> @hadd_pd_test1(<2 x
   %vecinit2 = insertelement <2 x double> %vecinit, double %add2, i32 1
   ret <2 x double> %vecinit2
 }
-; CHECK-LABEL: hadd_pd_test1
-; CHECK: haddpd
-; CHECK-NEXT: ret
-
 
 define <2 x double> @hadd_pd_test2(<2 x double> %A, <2 x double> %B) {
+; SSE-LABEL: hadd_pd_test2:
+; SSE:       # BB#0:
+; SSE-NEXT:    haddpd %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: hadd_pd_test2:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <2 x double> %A, i32 1
   %vecext1 = extractelement <2 x double> %A, i32 0
   %add = fadd double %vecext, %vecext1
@@ -236,12 +393,17 @@ define <2 x double> @hadd_pd_test2(<2 x
   %vecinit2 = insertelement <2 x double> %vecinit, double %add2, i32 1
   ret <2 x double> %vecinit2
 }
-; CHECK-LABEL: hadd_pd_test2
-; CHECK: haddpd
-; CHECK-NEXT: ret
-
 
 define <2 x double> @hsub_pd_test1(<2 x double> %A, <2 x double> %B) {
+; SSE-LABEL: hsub_pd_test1:
+; SSE:       # BB#0:
+; SSE-NEXT:    hsubpd %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: hsub_pd_test1:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhsubpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <2 x double> %A, i32 0
   %vecext1 = extractelement <2 x double> %A, i32 1
   %sub = fsub double %vecext, %vecext1
@@ -252,12 +414,17 @@ define <2 x double> @hsub_pd_test1(<2 x
   %vecinit2 = insertelement <2 x double> %vecinit, double %sub2, i32 1
   ret <2 x double> %vecinit2
 }
-; CHECK-LABEL: hsub_pd_test1
-; CHECK: hsubpd
-; CHECK-NEXT: ret
-
 
 define <2 x double> @hsub_pd_test2(<2 x double> %A, <2 x double> %B) {
+; SSE-LABEL: hsub_pd_test2:
+; SSE:       # BB#0:
+; SSE-NEXT:    hsubpd %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: hsub_pd_test2:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhsubpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <2 x double> %B, i32 0
   %vecext1 = extractelement <2 x double> %B, i32 1
   %sub = fsub double %vecext, %vecext1
@@ -268,12 +435,23 @@ define <2 x double> @hsub_pd_test2(<2 x
   %vecinit2 = insertelement <2 x double> %vecinit, double %sub2, i32 0
   ret <2 x double> %vecinit2
 }
-; CHECK-LABEL: hsub_pd_test2
-; CHECK: hsubpd
-; CHECK-NEXT: ret
-
 
 define <4 x double> @avx_vhadd_pd_test(<4 x double> %A, <4 x double> %B) {
+; SSE-LABEL: avx_vhadd_pd_test:
+; SSE:       # BB#0:
+; SSE-NEXT:    haddpd %xmm1, %xmm0
+; SSE-NEXT:    haddpd %xmm3, %xmm2
+; SSE-NEXT:    movapd %xmm2, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: avx_vhadd_pd_test:
+; AVX:       # BB#0:
+; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX-NEXT:    vhaddpd %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX-NEXT:    vhaddpd %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x double> %A, i32 0
   %vecext1 = extractelement <4 x double> %A, i32 1
   %add = fadd double %vecext, %vecext1
@@ -292,19 +470,23 @@ define <4 x double> @avx_vhadd_pd_test(<
   %vecinit13 = insertelement <4 x double> %vecinit9, double %add12, i32 3
   ret <4 x double> %vecinit13
 }
-; CHECK-LABEL: avx_vhadd_pd_test
-; SSE3: haddpd
-; SSE3-NEXT: haddpd
-; SSSE3: haddpd
-; SSSE3: haddpd
-; AVX: vhaddpd
-; AVX: vhaddpd
-; AVX2: vhaddpd
-; AVX2: vhaddpd
-; CHECK: ret
-
 
 define <4 x double> @avx_vhsub_pd_test(<4 x double> %A, <4 x double> %B) {
+; SSE-LABEL: avx_vhsub_pd_test:
+; SSE:       # BB#0:
+; SSE-NEXT:    hsubpd %xmm1, %xmm0
+; SSE-NEXT:    hsubpd %xmm3, %xmm2
+; SSE-NEXT:    movapd %xmm2, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: avx_vhsub_pd_test:
+; AVX:       # BB#0:
+; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX-NEXT:    vhsubpd %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX-NEXT:    vhsubpd %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x double> %A, i32 0
   %vecext1 = extractelement <4 x double> %A, i32 1
   %sub = fsub double %vecext, %vecext1
@@ -323,19 +505,86 @@ define <4 x double> @avx_vhsub_pd_test(<
   %vecinit13 = insertelement <4 x double> %vecinit9, double %sub12, i32 3
   ret <4 x double> %vecinit13
 }
-; CHECK-LABEL: avx_vhsub_pd_test
-; SSE3: hsubpd
-; SSE3-NEXT: hsubpd
-; SSSE3: hsubpd
-; SSSE3-NEXT: hsubpd
-; AVX: vhsubpd
-; AVX: vhsubpd
-; AVX2: vhsubpd
-; AVX2: vhsubpd
-; CHECK: ret
-
 
 define <8 x i32> @avx2_vphadd_d_test(<8 x i32> %A, <8 x i32> %B) {
+; SSE3-LABEL: avx2_vphadd_d_test:
+; SSE3:       # BB#0:
+; SSE3-NEXT:    movd %xmm0, %ecx
+; SSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,2,3]
+; SSE3-NEXT:    movd %xmm4, %r8d
+; SSE3-NEXT:    addl %ecx, %r8d
+; SSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
+; SSE3-NEXT:    movd %xmm4, %edx
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %r9d
+; SSE3-NEXT:    addl %edx, %r9d
+; SSE3-NEXT:    movd %xmm1, %esi
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %r10d
+; SSE3-NEXT:    addl %esi, %r10d
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE3-NEXT:    movd %xmm0, %esi
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %edi
+; SSE3-NEXT:    addl %esi, %edi
+; SSE3-NEXT:    movd %xmm2, %eax
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %r11d
+; SSE3-NEXT:    addl %eax, %r11d
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
+; SSE3-NEXT:    movd %xmm0, %eax
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[3,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %ecx
+; SSE3-NEXT:    addl %eax, %ecx
+; SSE3-NEXT:    movd %xmm3, %eax
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %edx
+; SSE3-NEXT:    addl %eax, %edx
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1]
+; SSE3-NEXT:    movd %xmm0, %eax
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[3,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %esi
+; SSE3-NEXT:    addl %eax, %esi
+; SSE3-NEXT:    movd %edi, %xmm0
+; SSE3-NEXT:    movd %r9d, %xmm1
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE3-NEXT:    movd %r10d, %xmm2
+; SSE3-NEXT:    movd %r8d, %xmm0
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE3-NEXT:    movd %esi, %xmm1
+; SSE3-NEXT:    movd %ecx, %xmm2
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE3-NEXT:    movd %edx, %xmm3
+; SSE3-NEXT:    movd %r11d, %xmm1
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE3-NEXT:    retq
+;
+; SSSE3-LABEL: avx2_vphadd_d_test:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phaddd %xmm1, %xmm0
+; SSSE3-NEXT:    phaddd %xmm3, %xmm2
+; SSSE3-NEXT:    movdqa %xmm2, %xmm1
+; SSSE3-NEXT:    retq
+;
+; AVX1-LABEL: avx2_vphadd_d_test:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vphaddd %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vphaddd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: avx2_vphadd_d_test:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vphaddd %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT:    vphaddd %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
   %vecext = extractelement <8 x i32> %A, i32 0
   %vecext1 = extractelement <8 x i32> %A, i32 1
   %add = add i32 %vecext, %vecext1
@@ -370,17 +619,154 @@ define <8 x i32> @avx2_vphadd_d_test(<8
   %vecinit29 = insertelement <8 x i32> %vecinit25, i32 %add28, i32 7
   ret <8 x i32> %vecinit29
 }
-; CHECK-LABEL: avx2_vphadd_d_test
-; SSE3-NOT: phaddd
-; SSSE3: phaddd
-; SSSE3-NEXT: phaddd
-; AVX: vphaddd
-; AVX: vphaddd
-; AVX2: vphaddd
-; AVX2: vphaddd
-; CHECK: ret
 
 define <16 x i16> @avx2_vphadd_w_test(<16 x i16> %a, <16 x i16> %b) {
+; SSE3-LABEL: avx2_vphadd_w_test:
+; SSE3:       # BB#0:
+; SSE3-NEXT:    pushq %rbp
+; SSE3-NEXT:  .Ltmp0:
+; SSE3-NEXT:    .cfi_def_cfa_offset 16
+; SSE3-NEXT:    pushq %r15
+; SSE3-NEXT:  .Ltmp1:
+; SSE3-NEXT:    .cfi_def_cfa_offset 24
+; SSE3-NEXT:    pushq %r14
+; SSE3-NEXT:  .Ltmp2:
+; SSE3-NEXT:    .cfi_def_cfa_offset 32
+; SSE3-NEXT:    pushq %r13
+; SSE3-NEXT:  .Ltmp3:
+; SSE3-NEXT:    .cfi_def_cfa_offset 40
+; SSE3-NEXT:    pushq %r12
+; SSE3-NEXT:  .Ltmp4:
+; SSE3-NEXT:    .cfi_def_cfa_offset 48
+; SSE3-NEXT:    pushq %rbx
+; SSE3-NEXT:  .Ltmp5:
+; SSE3-NEXT:    .cfi_def_cfa_offset 56
+; SSE3-NEXT:  .Ltmp6:
+; SSE3-NEXT:    .cfi_offset %rbx, -56
+; SSE3-NEXT:  .Ltmp7:
+; SSE3-NEXT:    .cfi_offset %r12, -48
+; SSE3-NEXT:  .Ltmp8:
+; SSE3-NEXT:    .cfi_offset %r13, -40
+; SSE3-NEXT:  .Ltmp9:
+; SSE3-NEXT:    .cfi_offset %r14, -32
+; SSE3-NEXT:  .Ltmp10:
+; SSE3-NEXT:    .cfi_offset %r15, -24
+; SSE3-NEXT:  .Ltmp11:
+; SSE3-NEXT:    .cfi_offset %rbp, -16
+; SSE3-NEXT:    movd %xmm0, %eax
+; SSE3-NEXT:    pextrw $1, %xmm0, %ecx
+; SSE3-NEXT:    addl %eax, %ecx
+; SSE3-NEXT:    movl %ecx, -{{[0-9]+}}(%rsp) # 4-byte Spill
+; SSE3-NEXT:    pextrw $2, %xmm0, %eax
+; SSE3-NEXT:    pextrw $3, %xmm0, %r11d
+; SSE3-NEXT:    addl %eax, %r11d
+; SSE3-NEXT:    pextrw $4, %xmm0, %eax
+; SSE3-NEXT:    pextrw $5, %xmm0, %r10d
+; SSE3-NEXT:    addl %eax, %r10d
+; SSE3-NEXT:    pextrw $6, %xmm0, %eax
+; SSE3-NEXT:    pextrw $7, %xmm0, %r13d
+; SSE3-NEXT:    addl %eax, %r13d
+; SSE3-NEXT:    movd %xmm1, %eax
+; SSE3-NEXT:    pextrw $1, %xmm1, %r14d
+; SSE3-NEXT:    addl %eax, %r14d
+; SSE3-NEXT:    pextrw $2, %xmm1, %eax
+; SSE3-NEXT:    pextrw $3, %xmm1, %ebp
+; SSE3-NEXT:    addl %eax, %ebp
+; SSE3-NEXT:    pextrw $4, %xmm1, %eax
+; SSE3-NEXT:    pextrw $5, %xmm1, %ebx
+; SSE3-NEXT:    addl %eax, %ebx
+; SSE3-NEXT:    pextrw $6, %xmm1, %eax
+; SSE3-NEXT:    pextrw $7, %xmm1, %edx
+; SSE3-NEXT:    addl %eax, %edx
+; SSE3-NEXT:    movd %xmm2, %eax
+; SSE3-NEXT:    pextrw $1, %xmm2, %ecx
+; SSE3-NEXT:    addl %eax, %ecx
+; SSE3-NEXT:    movl %ecx, -{{[0-9]+}}(%rsp) # 4-byte Spill
+; SSE3-NEXT:    pextrw $2, %xmm2, %eax
+; SSE3-NEXT:    pextrw $3, %xmm2, %r12d
+; SSE3-NEXT:    addl %eax, %r12d
+; SSE3-NEXT:    pextrw $4, %xmm2, %eax
+; SSE3-NEXT:    pextrw $5, %xmm2, %r15d
+; SSE3-NEXT:    addl %eax, %r15d
+; SSE3-NEXT:    pextrw $6, %xmm2, %eax
+; SSE3-NEXT:    pextrw $7, %xmm2, %r8d
+; SSE3-NEXT:    addl %eax, %r8d
+; SSE3-NEXT:    movd %xmm3, %eax
+; SSE3-NEXT:    pextrw $1, %xmm3, %r9d
+; SSE3-NEXT:    addl %eax, %r9d
+; SSE3-NEXT:    pextrw $2, %xmm3, %eax
+; SSE3-NEXT:    pextrw $3, %xmm3, %esi
+; SSE3-NEXT:    addl %eax, %esi
+; SSE3-NEXT:    pextrw $4, %xmm3, %eax
+; SSE3-NEXT:    pextrw $5, %xmm3, %edi
+; SSE3-NEXT:    addl %eax, %edi
+; SSE3-NEXT:    pextrw $6, %xmm3, %ecx
+; SSE3-NEXT:    pextrw $7, %xmm3, %eax
+; SSE3-NEXT:    addl %ecx, %eax
+; SSE3-NEXT:    movd %edx, %xmm8
+; SSE3-NEXT:    movd %r13d, %xmm3
+; SSE3-NEXT:    movd %ebp, %xmm9
+; SSE3-NEXT:    movd %r11d, %xmm4
+; SSE3-NEXT:    movd %ebx, %xmm10
+; SSE3-NEXT:    movd %r10d, %xmm7
+; SSE3-NEXT:    movd %r14d, %xmm11
+; SSE3-NEXT:    movd -{{[0-9]+}}(%rsp), %xmm0 # 4-byte Folded Reload
+; SSE3-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; SSE3-NEXT:    movd %eax, %xmm12
+; SSE3-NEXT:    movd %r8d, %xmm6
+; SSE3-NEXT:    movd %esi, %xmm13
+; SSE3-NEXT:    movd %r12d, %xmm5
+; SSE3-NEXT:    movd %edi, %xmm14
+; SSE3-NEXT:    movd %r15d, %xmm2
+; SSE3-NEXT:    movd %r9d, %xmm15
+; SSE3-NEXT:    movd -{{[0-9]+}}(%rsp), %xmm1 # 4-byte Folded Reload
+; SSE3-NEXT:    # xmm1 = mem[0],zero,zero,zero
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1],xmm0[2],xmm11[2],xmm0[3],xmm11[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm13[0],xmm5[1],xmm13[1],xmm5[2],xmm13[2],xmm5[3],xmm13[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1],xmm2[2],xmm14[2],xmm2[3],xmm14[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; SSE3-NEXT:    popq %rbx
+; SSE3-NEXT:    popq %r12
+; SSE3-NEXT:    popq %r13
+; SSE3-NEXT:    popq %r14
+; SSE3-NEXT:    popq %r15
+; SSE3-NEXT:    popq %rbp
+; SSE3-NEXT:    retq
+;
+; SSSE3-LABEL: avx2_vphadd_w_test:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phaddw %xmm1, %xmm0
+; SSSE3-NEXT:    phaddw %xmm3, %xmm2
+; SSSE3-NEXT:    movdqa %xmm2, %xmm1
+; SSSE3-NEXT:    retq
+;
+; AVX1-LABEL: avx2_vphadd_w_test:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vphaddw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vphaddw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: avx2_vphadd_w_test:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vphaddw %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT:    vphaddw %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
   %vecext = extractelement <16 x i16> %a, i32 0
   %vecext1 = extractelement <16 x i16> %a, i32 1
   %add = add i16 %vecext, %vecext1
@@ -447,20 +833,58 @@ define <16 x i16> @avx2_vphadd_w_test(<1
   %vecinit108 = insertelement <16 x i16> %vecinit101, i16 %add106, i32 15
   ret <16 x i16> %vecinit108
 }
-; CHECK-LABEL: avx2_vphadd_w_test
-; SSE3-NOT: phaddw
-; SSSE3: phaddw
-; SSSE3-NEXT: phaddw
-; AVX: vphaddw
-; AVX: vphaddw
-; AVX2: vphaddw
-; AVX2: vphaddw
-; CHECK: ret
-
 
 ; Verify that we don't select horizontal subs in the following functions.
 
 define <4 x i32> @not_a_hsub_1(<4 x i32> %A, <4 x i32> %B) {
+; SSE-LABEL: not_a_hsub_1:
+; SSE:       # BB#0:
+; SSE-NEXT:    movd %xmm0, %eax
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSE-NEXT:    movd %xmm2, %ecx
+; SSE-NEXT:    subl %ecx, %eax
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE-NEXT:    movd %xmm2, %ecx
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE-NEXT:    movd %xmm0, %edx
+; SSE-NEXT:    subl %edx, %ecx
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE-NEXT:    movd %xmm0, %edx
+; SSE-NEXT:    movd %xmm1, %esi
+; SSE-NEXT:    subl %esi, %edx
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
+; SSE-NEXT:    movd %xmm0, %esi
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE-NEXT:    movd %xmm0, %edi
+; SSE-NEXT:    subl %edi, %esi
+; SSE-NEXT:    movd %esi, %xmm0
+; SSE-NEXT:    movd %ecx, %xmm1
+; SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE-NEXT:    movd %edx, %xmm2
+; SSE-NEXT:    movd %eax, %xmm0
+; SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: not_a_hsub_1:
+; AVX:       # BB#0:
+; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    vpextrd $1, %xmm0, %ecx
+; AVX-NEXT:    subl %ecx, %eax
+; AVX-NEXT:    vpextrd $2, %xmm0, %ecx
+; AVX-NEXT:    vpextrd $3, %xmm0, %edx
+; AVX-NEXT:    subl %edx, %ecx
+; AVX-NEXT:    vpextrd $1, %xmm1, %edx
+; AVX-NEXT:    vmovd %xmm1, %esi
+; AVX-NEXT:    subl %esi, %edx
+; AVX-NEXT:    vpextrd $3, %xmm1, %esi
+; AVX-NEXT:    vpextrd $2, %xmm1, %edi
+; AVX-NEXT:    subl %edi, %esi
+; AVX-NEXT:    vmovd %eax, %xmm0
+; AVX-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrd $2, %edx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrd $3, %esi, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x i32> %A, i32 0
   %vecext1 = extractelement <4 x i32> %A, i32 1
   %sub = sub i32 %vecext, %vecext1
@@ -479,12 +903,45 @@ define <4 x i32> @not_a_hsub_1(<4 x i32>
   %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %sub12, i32 3
   ret <4 x i32> %vecinit13
 }
-; CHECK-LABEL: not_a_hsub_1
-; CHECK-NOT: phsubd
-; CHECK: ret
-
 
 define <4 x float> @not_a_hsub_2(<4 x float> %A, <4 x float> %B) {
+; SSE-LABEL: not_a_hsub_2:
+; SSE:       # BB#0:
+; SSE-NEXT:    movapd %xmm0, %xmm2
+; SSE-NEXT:    shufpd {{.*#+}} xmm2 = xmm2[1,0]
+; SSE-NEXT:    movapd %xmm0, %xmm3
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; SSE-NEXT:    subss %xmm3, %xmm2
+; SSE-NEXT:    movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE-NEXT:    subss %xmm3, %xmm0
+; SSE-NEXT:    movaps %xmm1, %xmm3
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; SSE-NEXT:    movaps %xmm1, %xmm4
+; SSE-NEXT:    shufpd {{.*#+}} xmm4 = xmm4[1,0]
+; SSE-NEXT:    subss %xmm4, %xmm3
+; SSE-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE-NEXT:    movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; SSE-NEXT:    subss %xmm3, %xmm1
+; SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: not_a_hsub_2:
+; AVX:       # BB#0:
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX-NEXT:    vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
+; AVX-NEXT:    vsubss %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX-NEXT:    vsubss %xmm3, %xmm0, %xmm0
+; AVX-NEXT:    vpermilps {{.*#+}} xmm3 = xmm1[3,1,2,3]
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
+; AVX-NEXT:    vsubss %xmm4, %xmm3, %xmm3
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX-NEXT:    vsubss %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %A, i32 2
   %vecext1 = extractelement <4 x float> %A, i32 3
   %sub = fsub float %vecext, %vecext1
@@ -503,12 +960,28 @@ define <4 x float> @not_a_hsub_2(<4 x fl
   %vecinit13 = insertelement <4 x float> %vecinit9, float %sub12, i32 2
   ret <4 x float> %vecinit13
 }
-; CHECK-LABEL: not_a_hsub_2
-; CHECK-NOT: hsubps
-; CHECK: ret
-
 
 define <2 x double> @not_a_hsub_3(<2 x double> %A, <2 x double> %B) {
+; SSE-LABEL: not_a_hsub_3:
+; SSE:       # BB#0:
+; SSE-NEXT:    movapd %xmm1, %xmm2
+; SSE-NEXT:    shufpd {{.*#+}} xmm2 = xmm2[1,0]
+; SSE-NEXT:    subsd %xmm2, %xmm1
+; SSE-NEXT:    movapd %xmm0, %xmm2
+; SSE-NEXT:    shufpd {{.*#+}} xmm2 = xmm2[1,0]
+; SSE-NEXT:    subsd %xmm0, %xmm2
+; SSE-NEXT:    unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm1[0]
+; SSE-NEXT:    movapd %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: not_a_hsub_3:
+; AVX:       # BB#0:
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX-NEXT:    vsubsd %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX-NEXT:    vsubsd %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT:    retq
   %vecext = extractelement <2 x double> %B, i32 0
   %vecext1 = extractelement <2 x double> %B, i32 1
   %sub = fsub double %vecext, %vecext1
@@ -519,15 +992,21 @@ define <2 x double> @not_a_hsub_3(<2 x d
   %vecinit2 = insertelement <2 x double> %vecinit, double %sub2, i32 0
   ret <2 x double> %vecinit2
 }
-; CHECK-LABEL: not_a_hsub_3
-; CHECK-NOT: hsubpd
-; CHECK: ret
-
 
 ; Test AVX horizontal add/sub of packed single/double precision
 ; floating point values from 256-bit vectors.
 
 define <8 x float> @avx_vhadd_ps(<8 x float> %a, <8 x float> %b) {
+; SSE-LABEL: avx_vhadd_ps:
+; SSE:       # BB#0:
+; SSE-NEXT:    haddps %xmm2, %xmm0
+; SSE-NEXT:    haddps %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: avx_vhadd_ps:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
   %vecext = extractelement <8 x float> %a, i32 0
   %vecext1 = extractelement <8 x float> %a, i32 1
   %add = fadd float %vecext, %vecext1
@@ -562,17 +1041,18 @@ define <8 x float> @avx_vhadd_ps(<8 x fl
   %vecinit29 = insertelement <8 x float> %vecinit25, float %add28, i32 7
   ret <8 x float> %vecinit29
 }
-; CHECK-LABEL: avx_vhadd_ps
-; SSE3: haddps
-; SSE3-NEXT: haddps
-; SSSE3: haddps
-; SSSE3-NEXT: haddps
-; AVX: vhaddps
-; AVX2: vhaddps
-; CHECK: ret
-
 
 define <8 x float> @avx_vhsub_ps(<8 x float> %a, <8 x float> %b) {
+; SSE-LABEL: avx_vhsub_ps:
+; SSE:       # BB#0:
+; SSE-NEXT:    hsubps %xmm2, %xmm0
+; SSE-NEXT:    hsubps %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: avx_vhsub_ps:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhsubps %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
   %vecext = extractelement <8 x float> %a, i32 0
   %vecext1 = extractelement <8 x float> %a, i32 1
   %sub = fsub float %vecext, %vecext1
@@ -607,17 +1087,18 @@ define <8 x float> @avx_vhsub_ps(<8 x fl
   %vecinit29 = insertelement <8 x float> %vecinit25, float %sub28, i32 7
   ret <8 x float> %vecinit29
 }
-; CHECK-LABEL: avx_vhsub_ps
-; SSE3: hsubps
-; SSE3-NEXT: hsubps
-; SSSE3: hsubps
-; SSSE3-NEXT: hsubps
-; AVX: vhsubps
-; AVX2: vhsubps
-; CHECK: ret
-
 
 define <4 x double> @avx_hadd_pd(<4 x double> %a, <4 x double> %b) {
+; SSE-LABEL: avx_hadd_pd:
+; SSE:       # BB#0:
+; SSE-NEXT:    haddpd %xmm2, %xmm0
+; SSE-NEXT:    haddpd %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: avx_hadd_pd:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x double> %a, i32 0
   %vecext1 = extractelement <4 x double> %a, i32 1
   %add = fadd double %vecext, %vecext1
@@ -636,17 +1117,18 @@ define <4 x double> @avx_hadd_pd(<4 x do
   %vecinit13 = insertelement <4 x double> %vecinit9, double %add12, i32 3
   ret <4 x double> %vecinit13
 }
-; CHECK-LABEL: avx_hadd_pd
-; SSE3: haddpd
-; SSE3-NEXT: haddpd
-; SSSE3: haddpd
-; SSSE3-NEXT: haddpd
-; AVX: vhaddpd
-; AVX2: vhaddpd
-; CHECK: ret
-
 
 define <4 x double> @avx_hsub_pd(<4 x double> %a, <4 x double> %b) {
+; SSE-LABEL: avx_hsub_pd:
+; SSE:       # BB#0:
+; SSE-NEXT:    hsubpd %xmm2, %xmm0
+; SSE-NEXT:    hsubpd %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: avx_hsub_pd:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhsubpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x double> %a, i32 0
   %vecext1 = extractelement <4 x double> %a, i32 1
   %sub = fsub double %vecext, %vecext1
@@ -665,19 +1147,83 @@ define <4 x double> @avx_hsub_pd(<4 x do
   %vecinit13 = insertelement <4 x double> %vecinit9, double %sub12, i32 3
   ret <4 x double> %vecinit13
 }
-; CHECK-LABEL: avx_hsub_pd
-; SSE3: hsubpd
-; SSE3-NEXT: hsubpd
-; SSSE3: hsubpd
-; SSSE3-NEXT: hsubpd
-; AVX: vhsubpd
-; AVX2: vhsubpd
-; CHECK: ret
-
 
 ; Test AVX2 horizontal add of packed integer values from 256-bit vectors.
 
 define <8 x i32> @avx2_hadd_d(<8 x i32> %a, <8 x i32> %b) {
+; SSE3-LABEL: avx2_hadd_d:
+; SSE3:       # BB#0:
+; SSE3-NEXT:    movd %xmm0, %ecx
+; SSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,2,3]
+; SSE3-NEXT:    movd %xmm4, %r8d
+; SSE3-NEXT:    addl %ecx, %r8d
+; SSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
+; SSE3-NEXT:    movd %xmm4, %edx
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %r9d
+; SSE3-NEXT:    addl %edx, %r9d
+; SSE3-NEXT:    movd %xmm2, %esi
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %r10d
+; SSE3-NEXT:    addl %esi, %r10d
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
+; SSE3-NEXT:    movd %xmm0, %esi
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[3,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %edi
+; SSE3-NEXT:    addl %esi, %edi
+; SSE3-NEXT:    movd %xmm1, %eax
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %r11d
+; SSE3-NEXT:    addl %eax, %r11d
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE3-NEXT:    movd %xmm0, %eax
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %ecx
+; SSE3-NEXT:    addl %eax, %ecx
+; SSE3-NEXT:    movd %xmm3, %eax
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %edx
+; SSE3-NEXT:    addl %eax, %edx
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1]
+; SSE3-NEXT:    movd %xmm0, %eax
+; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[3,1,2,3]
+; SSE3-NEXT:    movd %xmm0, %esi
+; SSE3-NEXT:    addl %eax, %esi
+; SSE3-NEXT:    movd %edi, %xmm0
+; SSE3-NEXT:    movd %r9d, %xmm1
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE3-NEXT:    movd %r10d, %xmm2
+; SSE3-NEXT:    movd %r8d, %xmm0
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE3-NEXT:    movd %esi, %xmm1
+; SSE3-NEXT:    movd %ecx, %xmm2
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE3-NEXT:    movd %edx, %xmm3
+; SSE3-NEXT:    movd %r11d, %xmm1
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE3-NEXT:    retq
+;
+; SSSE3-LABEL: avx2_hadd_d:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phaddd %xmm2, %xmm0
+; SSSE3-NEXT:    phaddd %xmm3, %xmm1
+; SSSE3-NEXT:    retq
+;
+; AVX1-LABEL: avx2_hadd_d:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vphaddd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: avx2_hadd_d:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vphaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
   %vecext = extractelement <8 x i32> %a, i32 0
   %vecext1 = extractelement <8 x i32> %a, i32 1
   %add = add i32 %vecext, %vecext1
@@ -712,18 +1258,149 @@ define <8 x i32> @avx2_hadd_d(<8 x i32>
   %vecinit29 = insertelement <8 x i32> %vecinit25, i32 %add28, i32 7
   ret <8 x i32> %vecinit29
 }
-; CHECK-LABEL: avx2_hadd_d
-; SSE3-NOT: phaddd
-; SSSE3: phaddd
-; SSSE3-NEXT: phaddd
-; AVX: vphaddd
-; AVX: vphaddd
-; AVX2: vphaddd
-; AVX2-NOT: vphaddd
-; CHECK: ret
-
 
 define <16 x i16> @avx2_hadd_w(<16 x i16> %a, <16 x i16> %b) {
+; SSE3-LABEL: avx2_hadd_w:
+; SSE3:       # BB#0:
+; SSE3-NEXT:    pushq %rbp
+; SSE3-NEXT:  .Ltmp12:
+; SSE3-NEXT:    .cfi_def_cfa_offset 16
+; SSE3-NEXT:    pushq %r15
+; SSE3-NEXT:  .Ltmp13:
+; SSE3-NEXT:    .cfi_def_cfa_offset 24
+; SSE3-NEXT:    pushq %r14
+; SSE3-NEXT:  .Ltmp14:
+; SSE3-NEXT:    .cfi_def_cfa_offset 32
+; SSE3-NEXT:    pushq %r13
+; SSE3-NEXT:  .Ltmp15:
+; SSE3-NEXT:    .cfi_def_cfa_offset 40
+; SSE3-NEXT:    pushq %r12
+; SSE3-NEXT:  .Ltmp16:
+; SSE3-NEXT:    .cfi_def_cfa_offset 48
+; SSE3-NEXT:    pushq %rbx
+; SSE3-NEXT:  .Ltmp17:
+; SSE3-NEXT:    .cfi_def_cfa_offset 56
+; SSE3-NEXT:  .Ltmp18:
+; SSE3-NEXT:    .cfi_offset %rbx, -56
+; SSE3-NEXT:  .Ltmp19:
+; SSE3-NEXT:    .cfi_offset %r12, -48
+; SSE3-NEXT:  .Ltmp20:
+; SSE3-NEXT:    .cfi_offset %r13, -40
+; SSE3-NEXT:  .Ltmp21:
+; SSE3-NEXT:    .cfi_offset %r14, -32
+; SSE3-NEXT:  .Ltmp22:
+; SSE3-NEXT:    .cfi_offset %r15, -24
+; SSE3-NEXT:  .Ltmp23:
+; SSE3-NEXT:    .cfi_offset %rbp, -16
+; SSE3-NEXT:    movd %xmm0, %eax
+; SSE3-NEXT:    pextrw $1, %xmm0, %ecx
+; SSE3-NEXT:    addl %eax, %ecx
+; SSE3-NEXT:    movl %ecx, -{{[0-9]+}}(%rsp) # 4-byte Spill
+; SSE3-NEXT:    pextrw $2, %xmm0, %eax
+; SSE3-NEXT:    pextrw $3, %xmm0, %r15d
+; SSE3-NEXT:    addl %eax, %r15d
+; SSE3-NEXT:    pextrw $4, %xmm0, %eax
+; SSE3-NEXT:    pextrw $5, %xmm0, %r14d
+; SSE3-NEXT:    addl %eax, %r14d
+; SSE3-NEXT:    pextrw $6, %xmm0, %eax
+; SSE3-NEXT:    pextrw $7, %xmm0, %r13d
+; SSE3-NEXT:    addl %eax, %r13d
+; SSE3-NEXT:    movd %xmm1, %eax
+; SSE3-NEXT:    pextrw $1, %xmm1, %ecx
+; SSE3-NEXT:    addl %eax, %ecx
+; SSE3-NEXT:    movl %ecx, -{{[0-9]+}}(%rsp) # 4-byte Spill
+; SSE3-NEXT:    pextrw $2, %xmm1, %eax
+; SSE3-NEXT:    pextrw $3, %xmm1, %r11d
+; SSE3-NEXT:    addl %eax, %r11d
+; SSE3-NEXT:    pextrw $4, %xmm1, %eax
+; SSE3-NEXT:    pextrw $5, %xmm1, %r10d
+; SSE3-NEXT:    addl %eax, %r10d
+; SSE3-NEXT:    pextrw $6, %xmm1, %eax
+; SSE3-NEXT:    pextrw $7, %xmm1, %r12d
+; SSE3-NEXT:    addl %eax, %r12d
+; SSE3-NEXT:    movd %xmm2, %eax
+; SSE3-NEXT:    pextrw $1, %xmm2, %ebx
+; SSE3-NEXT:    addl %eax, %ebx
+; SSE3-NEXT:    pextrw $2, %xmm2, %eax
+; SSE3-NEXT:    pextrw $3, %xmm2, %ecx
+; SSE3-NEXT:    addl %eax, %ecx
+; SSE3-NEXT:    pextrw $4, %xmm2, %esi
+; SSE3-NEXT:    pextrw $5, %xmm2, %r8d
+; SSE3-NEXT:    addl %esi, %r8d
+; SSE3-NEXT:    pextrw $6, %xmm2, %esi
+; SSE3-NEXT:    pextrw $7, %xmm2, %edx
+; SSE3-NEXT:    addl %esi, %edx
+; SSE3-NEXT:    movd %xmm3, %edi
+; SSE3-NEXT:    pextrw $1, %xmm3, %r9d
+; SSE3-NEXT:    addl %edi, %r9d
+; SSE3-NEXT:    pextrw $2, %xmm3, %ebp
+; SSE3-NEXT:    pextrw $3, %xmm3, %edi
+; SSE3-NEXT:    addl %ebp, %edi
+; SSE3-NEXT:    pextrw $4, %xmm3, %eax
+; SSE3-NEXT:    pextrw $5, %xmm3, %ebp
+; SSE3-NEXT:    addl %eax, %ebp
+; SSE3-NEXT:    pextrw $6, %xmm3, %esi
+; SSE3-NEXT:    pextrw $7, %xmm3, %eax
+; SSE3-NEXT:    addl %esi, %eax
+; SSE3-NEXT:    movd %edx, %xmm8
+; SSE3-NEXT:    movd %r13d, %xmm3
+; SSE3-NEXT:    movd %ecx, %xmm9
+; SSE3-NEXT:    movd %r15d, %xmm4
+; SSE3-NEXT:    movd %r8d, %xmm10
+; SSE3-NEXT:    movd %r14d, %xmm7
+; SSE3-NEXT:    movd %ebx, %xmm11
+; SSE3-NEXT:    movd -{{[0-9]+}}(%rsp), %xmm0 # 4-byte Folded Reload
+; SSE3-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; SSE3-NEXT:    movd %eax, %xmm12
+; SSE3-NEXT:    movd %r12d, %xmm6
+; SSE3-NEXT:    movd %edi, %xmm13
+; SSE3-NEXT:    movd %r11d, %xmm5
+; SSE3-NEXT:    movd %ebp, %xmm14
+; SSE3-NEXT:    movd %r10d, %xmm2
+; SSE3-NEXT:    movd %r9d, %xmm15
+; SSE3-NEXT:    movd -{{[0-9]+}}(%rsp), %xmm1 # 4-byte Folded Reload
+; SSE3-NEXT:    # xmm1 = mem[0],zero,zero,zero
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1],xmm0[2],xmm11[2],xmm0[3],xmm11[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm13[0],xmm5[1],xmm13[1],xmm5[2],xmm13[2],xmm5[3],xmm13[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1],xmm2[2],xmm14[2],xmm2[3],xmm14[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; SSE3-NEXT:    popq %rbx
+; SSE3-NEXT:    popq %r12
+; SSE3-NEXT:    popq %r13
+; SSE3-NEXT:    popq %r14
+; SSE3-NEXT:    popq %r15
+; SSE3-NEXT:    popq %rbp
+; SSE3-NEXT:    retq
+;
+; SSSE3-LABEL: avx2_hadd_w:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phaddw %xmm2, %xmm0
+; SSSE3-NEXT:    phaddw %xmm3, %xmm1
+; SSSE3-NEXT:    retq
+;
+; AVX1-LABEL: avx2_hadd_w:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vphaddw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vphaddw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: avx2_hadd_w:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vphaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
   %vecext = extractelement <16 x i16> %a, i32 0
   %vecext1 = extractelement <16 x i16> %a, i32 1
   %add = add i16 %vecext, %vecext1
@@ -790,13 +1467,3 @@ define <16 x i16> @avx2_hadd_w(<16 x i16
   %vecinit108 = insertelement <16 x i16> %vecinit101, i16 %add106, i32 15
   ret <16 x i16> %vecinit108
 }
-; CHECK-LABEL: avx2_hadd_w
-; SSE3-NOT: phaddw
-; SSSE3: phaddw
-; SSSE3-NEXT: phaddw
-; AVX: vphaddw
-; AVX: vphaddw
-; AVX2: vphaddw
-; AVX2-NOT: vphaddw
-; CHECK: ret
-

Modified: llvm/trunk/test/CodeGen/X86/haddsub-undef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/haddsub-undef.ll?rev=257992&r1=257991&r2=257992&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/haddsub-undef.ll (original)
+++ llvm/trunk/test/CodeGen/X86/haddsub-undef.ll Sat Jan 16 08:03:40 2016
@@ -1,10 +1,20 @@
-; RUN: llc < %s -march=x86-64 -mattr=ssse3 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE
-; RUN: llc < %s -march=x86-64 -mattr=avx | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
-; RUN: llc < %s -march=x86-64 -mattr=avx2 | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
 
 ; Verify that we correctly fold horizontal binop even in the presence of UNDEFs.
 
 define <4 x float> @test1_undef(<4 x float> %a, <4 x float> %b) {
+; SSE-LABEL: test1_undef:
+; SSE:       # BB#0:
+; SSE-NEXT:    haddps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test1_undef:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %a, i32 0
   %vecext1 = extractelement <4 x float> %a, i32 1
   %add = fadd float %vecext, %vecext1
@@ -19,14 +29,17 @@ define <4 x float> @test1_undef(<4 x flo
   %vecinit13 = insertelement <4 x float> %vecinit5, float %add12, i32 3
   ret <4 x float> %vecinit13
 }
-; CHECK-LABEL: test1_undef
-; SSE: haddps
-; AVX: vhaddps
-; AVX2: vhaddps
-; CHECK-NEXT: ret
-
 
 define <4 x float> @test2_undef(<4 x float> %a, <4 x float> %b) {
+; SSE-LABEL: test2_undef:
+; SSE:       # BB#0:
+; SSE-NEXT:    haddps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test2_undef:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %a, i32 0
   %vecext1 = extractelement <4 x float> %a, i32 1
   %add = fadd float %vecext, %vecext1
@@ -41,14 +54,17 @@ define <4 x float> @test2_undef(<4 x flo
   %vecinit13 = insertelement <4 x float> %vecinit9, float %add12, i32 3
   ret <4 x float> %vecinit13
 }
-; CHECK-LABEL: test2_undef
-; SSE: haddps
-; AVX: vhaddps
-; AVX2: vhaddps
-; CHECK-NEXT: ret
-
 
 define <4 x float> @test3_undef(<4 x float> %a, <4 x float> %b) {
+; SSE-LABEL: test3_undef:
+; SSE:       # BB#0:
+; SSE-NEXT:    haddps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test3_undef:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %a, i32 0
   %vecext1 = extractelement <4 x float> %a, i32 1
   %add = fadd float %vecext, %vecext1
@@ -63,38 +79,57 @@ define <4 x float> @test3_undef(<4 x flo
   %vecinit9 = insertelement <4 x float> %vecinit5, float %add8, i32 2
   ret <4 x float> %vecinit9
 }
-; CHECK-LABEL: test3_undef
-; SSE: haddps
-; AVX: vhaddps
-; AVX2: vhaddps
-; CHECK-NEXT: ret
-
 
 define <4 x float> @test4_undef(<4 x float> %a, <4 x float> %b) {
+; SSE-LABEL: test4_undef:
+; SSE:       # BB#0:
+; SSE-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE-NEXT:    addss %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test4_undef:
+; AVX:       # BB#0:
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %a, i32 0
   %vecext1 = extractelement <4 x float> %a, i32 1
   %add = fadd float %vecext, %vecext1
   %vecinit = insertelement <4 x float> undef, float %add, i32 0
   ret <4 x float> %vecinit
 }
-; CHECK-LABEL: test4_undef
-; CHECK-NOT: haddps
-; CHECK: ret
-
 
 define <2 x double> @test5_undef(<2 x double> %a, <2 x double> %b) {
+; SSE-LABEL: test5_undef:
+; SSE:       # BB#0:
+; SSE-NEXT:    movapd %xmm0, %xmm1
+; SSE-NEXT:    shufpd {{.*#+}} xmm1 = xmm1[1,0]
+; SSE-NEXT:    addsd %xmm0, %xmm1
+; SSE-NEXT:    movapd %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test5_undef:
+; AVX:       # BB#0:
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <2 x double> %a, i32 0
   %vecext1 = extractelement <2 x double> %a, i32 1
   %add = fadd double %vecext, %vecext1
   %vecinit = insertelement <2 x double> undef, double %add, i32 0
   ret <2 x double> %vecinit
 }
-; CHECK-LABEL: test5_undef
-; CHECK-NOT: haddpd
-; CHECK: ret
-
 
 define <4 x float> @test6_undef(<4 x float> %a, <4 x float> %b) {
+; SSE-LABEL: test6_undef:
+; SSE:       # BB#0:
+; SSE-NEXT:    haddps %xmm0, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test6_undef:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %a, i32 0
   %vecext1 = extractelement <4 x float> %a, i32 1
   %add = fadd float %vecext, %vecext1
@@ -105,14 +140,17 @@ define <4 x float> @test6_undef(<4 x flo
   %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 1
   ret <4 x float> %vecinit5
 }
-; CHECK-LABEL: test6_undef
-; SSE: haddps
-; AVX: vhaddps
-; AVX2: vhaddps
-; CHECK-NEXT: ret
-
 
 define <4 x float> @test7_undef(<4 x float> %a, <4 x float> %b) {
+; SSE-LABEL: test7_undef:
+; SSE:       # BB#0:
+; SSE-NEXT:    haddps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test7_undef:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %b, i32 0
   %vecext1 = extractelement <4 x float> %b, i32 1
   %add = fadd float %vecext, %vecext1
@@ -123,14 +161,30 @@ define <4 x float> @test7_undef(<4 x flo
   %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 3
   ret <4 x float> %vecinit5
 }
-; CHECK-LABEL: test7_undef
-; SSE: haddps
-; AVX: vhaddps
-; AVX2: vhaddps
-; CHECK-NEXT: ret
-
 
 define <4 x float> @test8_undef(<4 x float> %a, <4 x float> %b) {
+; SSE-LABEL: test8_undef:
+; SSE:       # BB#0:
+; SSE-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE-NEXT:    addss %xmm0, %xmm1
+; SSE-NEXT:    movaps %xmm0, %xmm2
+; SSE-NEXT:    shufpd {{.*#+}} xmm2 = xmm2[1,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE-NEXT:    addss %xmm2, %xmm0
+; SSE-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1,1,3]
+; SSE-NEXT:    movaps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test8_undef:
+; AVX:       # BB#0:
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-NEXT:    vaddss %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %a, i32 0
   %vecext1 = extractelement <4 x float> %a, i32 1
   %add = fadd float %vecext, %vecext1
@@ -141,12 +195,17 @@ define <4 x float> @test8_undef(<4 x flo
   %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 2
   ret <4 x float> %vecinit5
 }
-; CHECK-LABEL: test8_undef
-; CHECK-NOT: haddps
-; CHECK: ret
-
 
 define <4 x float> @test9_undef(<4 x float> %a, <4 x float> %b) {
+; SSE-LABEL: test9_undef:
+; SSE:       # BB#0:
+; SSE-NEXT:    haddps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test9_undef:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <4 x float> %a, i32 0
   %vecext1 = extractelement <4 x float> %a, i32 1
   %add = fadd float %vecext, %vecext1
@@ -157,11 +216,17 @@ define <4 x float> @test9_undef(<4 x flo
   %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 3
   ret <4 x float> %vecinit5
 }
-; CHECK-LABEL: test9_undef
-; CHECK: haddps
-; CHECK-NEXT: ret
 
 define <8 x float> @test10_undef(<8 x float> %a, <8 x float> %b) {
+; SSE-LABEL: test10_undef:
+; SSE:       # BB#0:
+; SSE-NEXT:    haddps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test10_undef:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
   %vecext = extractelement <8 x float> %a, i32 0
   %vecext1 = extractelement <8 x float> %a, i32 1
   %add = fadd float %vecext, %vecext1
@@ -172,14 +237,21 @@ define <8 x float> @test10_undef(<8 x fl
   %vecinit5 = insertelement <8 x float> %vecinit, float %add4, i32 3
   ret <8 x float> %vecinit5
 }
-; CHECK-LABEL: test10_undef
-; SSE: haddps
-; AVX: vhaddps
-; AVX2: vhaddps
-; CHECK-NOT: haddps
-; CHECK: ret
 
 define <8 x float> @test11_undef(<8 x float> %a, <8 x float> %b) {
+; SSE-LABEL: test11_undef:
+; SSE:       # BB#0:
+; SSE-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE-NEXT:    addss %xmm1, %xmm0
+; SSE-NEXT:    movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3]
+; SSE-NEXT:    addss %xmm3, %xmm1
+; SSE-NEXT:    movddup {{.*#+}} xmm1 = xmm1[0,0]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test11_undef:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %ymm0, %ymm0, %ymm0
+; AVX-NEXT:    retq
   %vecext = extractelement <8 x float> %a, i32 0
   %vecext1 = extractelement <8 x float> %a, i32 1
   %add = fadd float %vecext, %vecext1
@@ -190,13 +262,17 @@ define <8 x float> @test11_undef(<8 x fl
   %vecinit5 = insertelement <8 x float> %vecinit, float %add4, i32 6
   ret <8 x float> %vecinit5
 }
-; CHECK-LABEL: test11_undef
-; SSE-NOT: haddps
-; AVX: vhaddps
-; AVX2: vhaddps
-; CHECK: ret
 
 define <8 x float> @test12_undef(<8 x float> %a, <8 x float> %b) {
+; SSE-LABEL: test12_undef:
+; SSE:       # BB#0:
+; SSE-NEXT:    haddps %xmm0, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test12_undef:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %ymm0, %ymm0, %ymm0
+; AVX-NEXT:    retq
   %vecext = extractelement <8 x float> %a, i32 0
   %vecext1 = extractelement <8 x float> %a, i32 1
   %add = fadd float %vecext, %vecext1
@@ -207,14 +283,18 @@ define <8 x float> @test12_undef(<8 x fl
   %vecinit5 = insertelement <8 x float> %vecinit, float %add4, i32 1
   ret <8 x float> %vecinit5
 }
-; CHECK-LABEL: test12_undef
-; SSE: haddps
-; AVX: vhaddps
-; AVX2: vhaddps
-; CHECK-NOT: haddps
-; CHECK: ret
 
 define <8 x float> @test13_undef(<8 x float> %a, <8 x float> %b) {
+; SSE-LABEL: test13_undef:
+; SSE:       # BB#0:
+; SSE-NEXT:    haddps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test13_undef:
+; AVX:       # BB#0:
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %vecext = extractelement <8 x float> %a, i32 0
   %vecext1 = extractelement <8 x float> %a, i32 1
   %add1 = fadd float %vecext, %vecext1
@@ -233,15 +313,22 @@ define <8 x float> @test13_undef(<8 x fl
   %vecinit4 = insertelement <8 x float> %vecinit3, float %add4, i32 3
   ret <8 x float> %vecinit4
 }
-; CHECK-LABEL: test13_undef
-; SSE: haddps
-; SSE-NOT: haddps
-; AVX: vhaddps
-; AVX2: vhaddps
-; CHECK-NOT: haddps
-; CHECK: ret
 
 define <8 x i32> @test14_undef(<8 x i32> %a, <8 x i32> %b) {
+; SSE-LABEL: test14_undef:
+; SSE:       # BB#0:
+; SSE-NEXT:    phaddd %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: test14_undef:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test14_undef:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vphaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
   %vecext = extractelement <8 x i32> %a, i32 0
   %vecext1 = extractelement <8 x i32> %a, i32 1
   %add = add i32 %vecext, %vecext1
@@ -252,17 +339,45 @@ define <8 x i32> @test14_undef(<8 x i32>
   %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 3
   ret <8 x i32> %vecinit5
 }
-; CHECK-LABEL: test14_undef
-; SSE: phaddd
-; AVX: vphaddd
-; AVX2: vphaddd
-; CHECK-NOT: phaddd
-; CHECK: ret
 
 ; On AVX2, the following sequence can be folded into a single horizontal add.
-; If the Subtarget doesn't support AVX2, then we avoid emitting two packed 
+; If the Subtarget doesn't support AVX2, then we avoid emitting two packed
 ; integer horizontal adds instead of two scalar adds followed by vector inserts.
 define <8 x i32> @test15_undef(<8 x i32> %a, <8 x i32> %b) {
+; SSE-LABEL: test15_undef:
+; SSE:       # BB#0:
+; SSE-NEXT:    movd %xmm0, %eax
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE-NEXT:    movd %xmm0, %ecx
+; SSE-NEXT:    addl %eax, %ecx
+; SSE-NEXT:    movd %xmm3, %eax
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3]
+; SSE-NEXT:    movd %xmm0, %edx
+; SSE-NEXT:    addl %eax, %edx
+; SSE-NEXT:    movd %ecx, %xmm0
+; SSE-NEXT:    movd %edx, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: test15_undef:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    vpextrd $1, %xmm0, %ecx
+; AVX1-NEXT:    addl %eax, %ecx
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm0
+; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    vpextrd $1, %xmm0, %edx
+; AVX1-NEXT:    addl %eax, %edx
+; AVX1-NEXT:    vmovd %ecx, %xmm0
+; AVX1-NEXT:    vmovd %edx, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test15_undef:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vphaddd %ymm0, %ymm0, %ymm0
+; AVX2-NEXT:    retq
   %vecext = extractelement <8 x i32> %a, i32 0
   %vecext1 = extractelement <8 x i32> %a, i32 1
   %add = add i32 %vecext, %vecext1
@@ -273,13 +388,22 @@ define <8 x i32> @test15_undef(<8 x i32>
   %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 6
   ret <8 x i32> %vecinit5
 }
-; CHECK-LABEL: test15_undef
-; SSE-NOT: phaddd
-; AVX-NOT: vphaddd
-; AVX2: vphaddd
-; CHECK: ret
 
 define <8 x i32> @test16_undef(<8 x i32> %a, <8 x i32> %b) {
+; SSE-LABEL: test16_undef:
+; SSE:       # BB#0:
+; SSE-NEXT:    phaddd %xmm0, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: test16_undef:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test16_undef:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vphaddd %ymm0, %ymm0, %ymm0
+; AVX2-NEXT:    retq
   %vecext = extractelement <8 x i32> %a, i32 0
   %vecext1 = extractelement <8 x i32> %a, i32 1
   %add = add i32 %vecext, %vecext1
@@ -290,14 +414,24 @@ define <8 x i32> @test16_undef(<8 x i32>
   %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 1
   ret <8 x i32> %vecinit5
 }
-; CHECK-LABEL: test16_undef
-; SSE: phaddd
-; AVX: vphaddd
-; AVX2: vphaddd
-; CHECK-NOT: haddps
-; CHECK: ret
 
 define <8 x i32> @test17_undef(<8 x i32> %a, <8 x i32> %b) {
+; SSE-LABEL: test17_undef:
+; SSE:       # BB#0:
+; SSE-NEXT:    phaddd %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: test17_undef:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test17_undef:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
   %vecext = extractelement <8 x i32> %a, i32 0
   %vecext1 = extractelement <8 x i32> %a, i32 1
   %add1 = add i32 %vecext, %vecext1
@@ -316,10 +450,3 @@ define <8 x i32> @test17_undef(<8 x i32>
   %vecinit4 = insertelement <8 x i32> %vecinit3, i32 %add4, i32 3
   ret <8 x i32> %vecinit4
 }
-; CHECK-LABEL: test17_undef
-; SSE: phaddd
-; AVX: vphaddd
-; AVX2: vphaddd
-; CHECK-NOT: haddps
-; CHECK: ret
-

Modified: llvm/trunk/test/CodeGen/X86/haddsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/haddsub.ll?rev=257992&r1=257991&r2=257992&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/haddsub.ll (original)
+++ llvm/trunk/test/CodeGen/X86/haddsub.ll Sat Jan 16 08:03:40 2016
@@ -1,293 +1,392 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse3,-avx | FileCheck %s -check-prefix=SSE3
-; RUN: llc < %s -march=x86-64 -mattr=-sse3,+avx | FileCheck %s -check-prefix=AVX
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3 | FileCheck %s --check-prefix=SSE3
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX
 
+define <2 x double> @haddpd1(<2 x double> %x, <2 x double> %y) {
 ; SSE3-LABEL: haddpd1:
-; SSE3-NOT: vhaddpd
-; SSE3: haddpd
+; SSE3:       # BB#0:
+; SSE3-NEXT:    haddpd %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: haddpd1:
-; AVX: vhaddpd
-define <2 x double> @haddpd1(<2 x double> %x, <2 x double> %y) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 0, i32 2>
   %b = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 1, i32 3>
   %r = fadd <2 x double> %a, %b
   ret <2 x double> %r
 }
 
+define <2 x double> @haddpd2(<2 x double> %x, <2 x double> %y) {
 ; SSE3-LABEL: haddpd2:
-; SSE3-NOT: vhaddpd
-; SSE3: haddpd
+; SSE3:       # BB#0:
+; SSE3-NEXT:    haddpd %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: haddpd2:
-; AVX: vhaddpd
-define <2 x double> @haddpd2(<2 x double> %x, <2 x double> %y) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 1, i32 2>
   %b = shufflevector <2 x double> %y, <2 x double> %x, <2 x i32> <i32 2, i32 1>
   %r = fadd <2 x double> %a, %b
   ret <2 x double> %r
 }
 
+define <2 x double> @haddpd3(<2 x double> %x) {
 ; SSE3-LABEL: haddpd3:
-; SSE3-NOT: vhaddpd
-; SSE3: haddpd
+; SSE3:       # BB#0:
+; SSE3-NEXT:    haddpd %xmm0, %xmm0
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: haddpd3:
-; AVX: vhaddpd
-define <2 x double> @haddpd3(<2 x double> %x) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 0, i32 undef>
   %b = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 1, i32 undef>
   %r = fadd <2 x double> %a, %b
   ret <2 x double> %r
 }
 
+define <4 x float> @haddps1(<4 x float> %x, <4 x float> %y) {
 ; SSE3-LABEL: haddps1:
-; SSE3-NOT: vhaddps
-; SSE3: haddps
+; SSE3:       # BB#0:
+; SSE3-NEXT:    haddps %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: haddps1:
-; AVX: vhaddps
-define <4 x float> @haddps1(<4 x float> %x, <4 x float> %y) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %b = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
   %r = fadd <4 x float> %a, %b
   ret <4 x float> %r
 }
 
+define <4 x float> @haddps2(<4 x float> %x, <4 x float> %y) {
 ; SSE3-LABEL: haddps2:
-; SSE3-NOT: vhaddps
-; SSE3: haddps
+; SSE3:       # BB#0:
+; SSE3-NEXT:    haddps %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: haddps2:
-; AVX: vhaddps
-define <4 x float> @haddps2(<4 x float> %x, <4 x float> %y) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 1, i32 2, i32 5, i32 6>
   %b = shufflevector <4 x float> %y, <4 x float> %x, <4 x i32> <i32 4, i32 7, i32 0, i32 3>
   %r = fadd <4 x float> %a, %b
   ret <4 x float> %r
 }
 
+define <4 x float> @haddps3(<4 x float> %x) {
 ; SSE3-LABEL: haddps3:
-; SSE3-NOT: vhaddps
-; SSE3: haddps
+; SSE3:       # BB#0:
+; SSE3-NEXT:    haddps %xmm0, %xmm0
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: haddps3:
-; AVX: vhaddps
-define <4 x float> @haddps3(<4 x float> %x) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 2, i32 4, i32 6>
   %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 3, i32 5, i32 7>
   %r = fadd <4 x float> %a, %b
   ret <4 x float> %r
 }
 
+define <4 x float> @haddps4(<4 x float> %x) {
 ; SSE3-LABEL: haddps4:
-; SSE3-NOT: vhaddps
-; SSE3: haddps
+; SSE3:       # BB#0:
+; SSE3-NEXT:    haddps %xmm0, %xmm0
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: haddps4:
-; AVX: vhaddps
-define <4 x float> @haddps4(<4 x float> %x) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
   %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
   %r = fadd <4 x float> %a, %b
   ret <4 x float> %r
 }
 
+define <4 x float> @haddps5(<4 x float> %x) {
 ; SSE3-LABEL: haddps5:
-; SSE3-NOT: vhaddps
-; SSE3: haddps
+; SSE3:       # BB#0:
+; SSE3-NEXT:    haddps %xmm0, %xmm0
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: haddps5:
-; AVX: vhaddps
-define <4 x float> @haddps5(<4 x float> %x) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 3, i32 undef, i32 undef>
   %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 undef, i32 undef>
   %r = fadd <4 x float> %a, %b
   ret <4 x float> %r
 }
 
+define <4 x float> @haddps6(<4 x float> %x) {
 ; SSE3-LABEL: haddps6:
-; SSE3-NOT: vhaddps
-; SSE3: haddps
+; SSE3:       # BB#0:
+; SSE3-NEXT:    haddps %xmm0, %xmm0
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: haddps6:
-; AVX: vhaddps
-define <4 x float> @haddps6(<4 x float> %x) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
   %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
   %r = fadd <4 x float> %a, %b
   ret <4 x float> %r
 }
 
+define <4 x float> @haddps7(<4 x float> %x) {
 ; SSE3-LABEL: haddps7:
-; SSE3-NOT: vhaddps
-; SSE3: haddps
+; SSE3:       # BB#0:
+; SSE3-NEXT:    haddps %xmm0, %xmm0
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: haddps7:
-; AVX: vhaddps
-define <4 x float> @haddps7(<4 x float> %x) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 3, i32 undef, i32 undef>
   %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 2, i32 undef, i32 undef>
   %r = fadd <4 x float> %a, %b
   ret <4 x float> %r
 }
 
+define <2 x double> @hsubpd1(<2 x double> %x, <2 x double> %y) {
 ; SSE3-LABEL: hsubpd1:
-; SSE3-NOT: vhsubpd
-; SSE3: hsubpd
+; SSE3:       # BB#0:
+; SSE3-NEXT:    hsubpd %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: hsubpd1:
-; AVX: vhsubpd
-define <2 x double> @hsubpd1(<2 x double> %x, <2 x double> %y) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhsubpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 0, i32 2>
   %b = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> <i32 1, i32 3>
   %r = fsub <2 x double> %a, %b
   ret <2 x double> %r
 }
 
+define <2 x double> @hsubpd2(<2 x double> %x) {
 ; SSE3-LABEL: hsubpd2:
-; SSE3-NOT: vhsubpd
-; SSE3: hsubpd
+; SSE3:       # BB#0:
+; SSE3-NEXT:    hsubpd %xmm0, %xmm0
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: hsubpd2:
-; AVX: vhsubpd
-define <2 x double> @hsubpd2(<2 x double> %x) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhsubpd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 0, i32 undef>
   %b = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 1, i32 undef>
   %r = fsub <2 x double> %a, %b
   ret <2 x double> %r
 }
 
+define <4 x float> @hsubps1(<4 x float> %x, <4 x float> %y) {
 ; SSE3-LABEL: hsubps1:
-; SSE3-NOT: vhsubps
-; SSE3: hsubps
+; SSE3:       # BB#0:
+; SSE3-NEXT:    hsubps %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: hsubps1:
-; AVX: vhsubps
-define <4 x float> @hsubps1(<4 x float> %x, <4 x float> %y) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhsubps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %b = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
   %r = fsub <4 x float> %a, %b
   ret <4 x float> %r
 }
 
+define <4 x float> @hsubps2(<4 x float> %x) {
 ; SSE3-LABEL: hsubps2:
-; SSE3-NOT: vhsubps
-; SSE3: hsubps
+; SSE3:       # BB#0:
+; SSE3-NEXT:    hsubps %xmm0, %xmm0
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: hsubps2:
-; AVX: vhsubps
-define <4 x float> @hsubps2(<4 x float> %x) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhsubps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 2, i32 4, i32 6>
   %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 3, i32 5, i32 7>
   %r = fsub <4 x float> %a, %b
   ret <4 x float> %r
 }
 
+define <4 x float> @hsubps3(<4 x float> %x) {
 ; SSE3-LABEL: hsubps3:
-; SSE3-NOT: vhsubps
-; SSE3: hsubps
+; SSE3:       # BB#0:
+; SSE3-NEXT:    hsubps %xmm0, %xmm0
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: hsubps3:
-; AVX: vhsubps
-define <4 x float> @hsubps3(<4 x float> %x) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhsubps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
   %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
   %r = fsub <4 x float> %a, %b
   ret <4 x float> %r
 }
 
+define <4 x float> @hsubps4(<4 x float> %x) {
 ; SSE3-LABEL: hsubps4:
-; SSE3-NOT: vhsubps
-; SSE3: hsubps
+; SSE3:       # BB#0:
+; SSE3-NEXT:    hsubps %xmm0, %xmm0
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: hsubps4:
-; AVX: vhsubps
-define <4 x float> @hsubps4(<4 x float> %x) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhsubps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
   %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
   %r = fsub <4 x float> %a, %b
   ret <4 x float> %r
 }
 
+define <8 x float> @vhaddps1(<8 x float> %x, <8 x float> %y) {
 ; SSE3-LABEL: vhaddps1:
-; SSE3-NOT: vhaddps
-; SSE3: haddps
-; SSE3: haddps
+; SSE3:       # BB#0:
+; SSE3-NEXT:    haddps %xmm2, %xmm0
+; SSE3-NEXT:    haddps %xmm3, %xmm1
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: vhaddps1:
-; AVX: vhaddps
-define <8 x float> @vhaddps1(<8 x float> %x, <8 x float> %y) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
   %a = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14>
   %b = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 1, i32 3, i32 9, i32 11, i32 5, i32 7, i32 13, i32 15>
   %r = fadd <8 x float> %a, %b
   ret <8 x float> %r
 }
 
+define <8 x float> @vhaddps2(<8 x float> %x, <8 x float> %y) {
 ; SSE3-LABEL: vhaddps2:
-; SSE3-NOT: vhaddps
-; SSE3: haddps
-; SSE3: haddps
+; SSE3:       # BB#0:
+; SSE3-NEXT:    haddps %xmm2, %xmm0
+; SSE3-NEXT:    haddps %xmm3, %xmm1
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: vhaddps2:
-; AVX: vhaddps
-define <8 x float> @vhaddps2(<8 x float> %x, <8 x float> %y) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
   %a = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 1, i32 2, i32 9, i32 10, i32 5, i32 6, i32 13, i32 14>
   %b = shufflevector <8 x float> %y, <8 x float> %x, <8 x i32> <i32 8, i32 11, i32 0, i32 3, i32 12, i32 15, i32 4, i32 7>
   %r = fadd <8 x float> %a, %b
   ret <8 x float> %r
 }
 
+define <8 x float> @vhaddps3(<8 x float> %x) {
 ; SSE3-LABEL: vhaddps3:
-; SSE3-NOT: vhaddps
-; SSE3: haddps
-; SSE3: haddps
+; SSE3:       # BB#0:
+; SSE3-NEXT:    haddps %xmm0, %xmm0
+; SSE3-NEXT:    haddps %xmm1, %xmm1
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: vhaddps3:
-; AVX: vhaddps
-define <8 x float> @vhaddps3(<8 x float> %x) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %ymm0, %ymm0, %ymm0
+; AVX-NEXT:    retq
   %a = shufflevector <8 x float> %x, <8 x float> undef, <8 x i32> <i32 undef, i32 2, i32 8, i32 10, i32 4, i32 6, i32 undef, i32 14>
   %b = shufflevector <8 x float> %x, <8 x float> undef, <8 x i32> <i32 1, i32 3, i32 9, i32 undef, i32 5, i32 7, i32 13, i32 15>
   %r = fadd <8 x float> %a, %b
   ret <8 x float> %r
 }
 
+define <8 x float> @vhsubps1(<8 x float> %x, <8 x float> %y) {
 ; SSE3-LABEL: vhsubps1:
-; SSE3-NOT: vhsubps
-; SSE3: hsubps
-; SSE3: hsubps
+; SSE3:       # BB#0:
+; SSE3-NEXT:    hsubps %xmm2, %xmm0
+; SSE3-NEXT:    hsubps %xmm3, %xmm1
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: vhsubps1:
-; AVX: vhsubps
-define <8 x float> @vhsubps1(<8 x float> %x, <8 x float> %y) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhsubps %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
   %a = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14>
   %b = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 1, i32 3, i32 9, i32 11, i32 5, i32 7, i32 13, i32 15>
   %r = fsub <8 x float> %a, %b
   ret <8 x float> %r
 }
 
+define <8 x float> @vhsubps3(<8 x float> %x) {
 ; SSE3-LABEL: vhsubps3:
-; SSE3-NOT: vhsubps
-; SSE3: hsubps
-; SSE3: hsubps
+; SSE3:       # BB#0:
+; SSE3-NEXT:    hsubps %xmm0, %xmm0
+; SSE3-NEXT:    hsubps %xmm1, %xmm1
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: vhsubps3:
-; AVX: vhsubps
-define <8 x float> @vhsubps3(<8 x float> %x) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhsubps %ymm0, %ymm0, %ymm0
+; AVX-NEXT:    retq
   %a = shufflevector <8 x float> %x, <8 x float> undef, <8 x i32> <i32 undef, i32 2, i32 8, i32 10, i32 4, i32 6, i32 undef, i32 14>
   %b = shufflevector <8 x float> %x, <8 x float> undef, <8 x i32> <i32 1, i32 3, i32 9, i32 undef, i32 5, i32 7, i32 13, i32 15>
   %r = fsub <8 x float> %a, %b
   ret <8 x float> %r
 }
 
+define <4 x double> @vhaddpd1(<4 x double> %x, <4 x double> %y) {
 ; SSE3-LABEL: vhaddpd1:
-; SSE3-NOT: vhaddpd
-; SSE3: haddpd
-; SSE3: haddpd
+; SSE3:       # BB#0:
+; SSE3-NEXT:    haddpd %xmm2, %xmm0
+; SSE3-NEXT:    haddpd %xmm3, %xmm1
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: vhaddpd1:
-; AVX: vhaddpd
-define <4 x double> @vhaddpd1(<4 x double> %x, <4 x double> %y) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x double> %x, <4 x double> %y, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
   %b = shufflevector <4 x double> %x, <4 x double> %y, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
   %r = fadd <4 x double> %a, %b
   ret <4 x double> %r
 }
 
+define <4 x double> @vhsubpd1(<4 x double> %x, <4 x double> %y) {
 ; SSE3-LABEL: vhsubpd1:
-; SSE3-NOT: vhsubpd
-; SSE3: hsubpd
-; SSE3: hsubpd
+; SSE3:       # BB#0:
+; SSE3-NEXT:    hsubpd %xmm2, %xmm0
+; SSE3-NEXT:    hsubpd %xmm3, %xmm1
+; SSE3-NEXT:    retq
+;
 ; AVX-LABEL: vhsubpd1:
-; AVX: vhsubpd
-define <4 x double> @vhsubpd1(<4 x double> %x, <4 x double> %y) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vhsubpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x double> %x, <4 x double> %y, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
   %b = shufflevector <4 x double> %x, <4 x double> %y, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
   %r = fsub <4 x double> %a, %b
   ret <4 x double> %r
 }
 
-; CHECK-LABEL: haddps_v2f32
-; CHECK: haddps %xmm{{[0-9]+}}, %xmm0
-; CHECK-NEXT: retq
 define <2 x float> @haddps_v2f32(<4 x float> %v0) {
+; SSE3-LABEL: haddps_v2f32:
+; SSE3:       # BB#0:
+; SSE3-NEXT:    haddps %xmm0, %xmm0
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: haddps_v2f32:
+; AVX:       # BB#0:
+; AVX-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %v0.0 = extractelement <4 x float> %v0, i32 0
   %v0.1 = extractelement <4 x float> %v0, i32 1
   %v0.2 = extractelement <4 x float> %v0, i32 2

Modified: llvm/trunk/test/CodeGen/X86/phaddsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/phaddsub.ll?rev=257992&r1=257991&r2=257992&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/phaddsub.ll (original)
+++ llvm/trunk/test/CodeGen/X86/phaddsub.ll Sat Jan 16 08:03:40 2016
@@ -1,168 +1,225 @@
-; RUN: llc < %s -march=x86-64 -mattr=+ssse3,-avx | FileCheck %s -check-prefix=SSSE3
-; RUN: llc < %s -march=x86-64 -mattr=-ssse3,+avx | FileCheck %s -check-prefix=AVX
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX
 
+define <8 x i16> @phaddw1(<8 x i16> %x, <8 x i16> %y) {
 ; SSSE3-LABEL: phaddw1:
-; SSSE3-NOT: vphaddw
-; SSSE3: phaddw
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phaddw %xmm1, %xmm0
+; SSSE3-NEXT:    retq
+;
 ; AVX-LABEL: phaddw1:
-; AVX: vphaddw
-define <8 x i16> @phaddw1(<8 x i16> %x, <8 x i16> %y) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vphaddw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   %b = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
   %r = add <8 x i16> %a, %b
   ret <8 x i16> %r
 }
 
+define <8 x i16> @phaddw2(<8 x i16> %x, <8 x i16> %y) {
 ; SSSE3-LABEL: phaddw2:
-; SSSE3-NOT: vphaddw
-; SSSE3: phaddw
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phaddw %xmm1, %xmm0
+; SSSE3-NEXT:    retq
+;
 ; AVX-LABEL: phaddw2:
-; AVX: vphaddw
-define <8 x i16> @phaddw2(<8 x i16> %x, <8 x i16> %y) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vphaddw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> <i32 1, i32 2, i32 5, i32 6, i32 9, i32 10, i32 13, i32 14>
   %b = shufflevector <8 x i16> %y, <8 x i16> %x, <8 x i32> <i32 8, i32 11, i32 12, i32 15, i32 0, i32 3, i32 4, i32 7>
   %r = add <8 x i16> %a, %b
   ret <8 x i16> %r
 }
 
+define <4 x i32> @phaddd1(<4 x i32> %x, <4 x i32> %y) {
 ; SSSE3-LABEL: phaddd1:
-; SSSE3-NOT: vphaddd
-; SSSE3: phaddd
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phaddd %xmm1, %xmm0
+; SSSE3-NEXT:    retq
+;
 ; AVX-LABEL: phaddd1:
-; AVX: vphaddd
-define <4 x i32> @phaddd1(<4 x i32> %x, <4 x i32> %y) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %b = shufflevector <4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
   %r = add <4 x i32> %a, %b
   ret <4 x i32> %r
 }
 
+define <4 x i32> @phaddd2(<4 x i32> %x, <4 x i32> %y) {
 ; SSSE3-LABEL: phaddd2:
-; SSSE3-NOT: vphaddd
-; SSSE3: phaddd
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phaddd %xmm1, %xmm0
+; SSSE3-NEXT:    retq
+;
 ; AVX-LABEL: phaddd2:
-; AVX: vphaddd
-define <4 x i32> @phaddd2(<4 x i32> %x, <4 x i32> %y) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 1, i32 2, i32 5, i32 6>
   %b = shufflevector <4 x i32> %y, <4 x i32> %x, <4 x i32> <i32 4, i32 7, i32 0, i32 3>
   %r = add <4 x i32> %a, %b
   ret <4 x i32> %r
 }
 
+define <4 x i32> @phaddd3(<4 x i32> %x) {
 ; SSSE3-LABEL: phaddd3:
-; SSSE3-NOT: vphaddd
-; SSSE3: phaddd
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phaddd %xmm0, %xmm0
+; SSSE3-NEXT:    retq
+;
 ; AVX-LABEL: phaddd3:
-; AVX: vphaddd
-define <4 x i32> @phaddd3(<4 x i32> %x) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 undef, i32 2, i32 4, i32 6>
   %b = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 undef, i32 3, i32 5, i32 7>
   %r = add <4 x i32> %a, %b
   ret <4 x i32> %r
 }
 
+define <4 x i32> @phaddd4(<4 x i32> %x) {
 ; SSSE3-LABEL: phaddd4:
-; SSSE3-NOT: vphaddd
-; SSSE3: phaddd
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phaddd %xmm0, %xmm0
+; SSSE3-NEXT:    retq
+;
 ; AVX-LABEL: phaddd4:
-; AVX: vphaddd
-define <4 x i32> @phaddd4(<4 x i32> %x) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
   %b = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
   %r = add <4 x i32> %a, %b
   ret <4 x i32> %r
 }
 
+define <4 x i32> @phaddd5(<4 x i32> %x) {
 ; SSSE3-LABEL: phaddd5:
-; SSSE3-NOT: vphaddd
-; SSSE3: phaddd
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phaddd %xmm0, %xmm0
+; SSSE3-NEXT:    retq
+;
 ; AVX-LABEL: phaddd5:
-; AVX: vphaddd
-define <4 x i32> @phaddd5(<4 x i32> %x) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 0, i32 3, i32 undef, i32 undef>
   %b = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 undef, i32 undef>
   %r = add <4 x i32> %a, %b
   ret <4 x i32> %r
 }
 
+define <4 x i32> @phaddd6(<4 x i32> %x) {
 ; SSSE3-LABEL: phaddd6:
-; SSSE3-NOT: vphaddd
-; SSSE3: phaddd
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phaddd %xmm0, %xmm0
+; SSSE3-NEXT:    retq
+;
 ; AVX-LABEL: phaddd6:
-; AVX: vphaddd
-define <4 x i32> @phaddd6(<4 x i32> %x) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
   %b = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
   %r = add <4 x i32> %a, %b
   ret <4 x i32> %r
 }
 
+define <4 x i32> @phaddd7(<4 x i32> %x) {
 ; SSSE3-LABEL: phaddd7:
-; SSSE3-NOT: vphaddd
-; SSSE3: phaddd
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phaddd %xmm0, %xmm0
+; SSSE3-NEXT:    retq
+;
 ; AVX-LABEL: phaddd7:
-; AVX: vphaddd
-define <4 x i32> @phaddd7(<4 x i32> %x) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 undef, i32 3, i32 undef, i32 undef>
   %b = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 undef, i32 2, i32 undef, i32 undef>
   %r = add <4 x i32> %a, %b
   ret <4 x i32> %r
 }
 
+define <8 x i16> @phsubw1(<8 x i16> %x, <8 x i16> %y) {
 ; SSSE3-LABEL: phsubw1:
-; SSSE3-NOT: vphsubw
-; SSSE3: phsubw
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phsubw %xmm1, %xmm0
+; SSSE3-NEXT:    retq
+;
 ; AVX-LABEL: phsubw1:
-; AVX: vphsubw
-define <8 x i16> @phsubw1(<8 x i16> %x, <8 x i16> %y) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vphsubw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   %b = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
   %r = sub <8 x i16> %a, %b
   ret <8 x i16> %r
 }
 
+define <4 x i32> @phsubd1(<4 x i32> %x, <4 x i32> %y) {
 ; SSSE3-LABEL: phsubd1:
-; SSSE3-NOT: vphsubd
-; SSSE3: phsubd
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phsubd %xmm1, %xmm0
+; SSSE3-NEXT:    retq
+;
 ; AVX-LABEL: phsubd1:
-; AVX: vphsubd
-define <4 x i32> @phsubd1(<4 x i32> %x, <4 x i32> %y) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vphsubd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %b = shufflevector <4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
   %r = sub <4 x i32> %a, %b
   ret <4 x i32> %r
 }
 
+define <4 x i32> @phsubd2(<4 x i32> %x) {
 ; SSSE3-LABEL: phsubd2:
-; SSSE3-NOT: vphsubd
-; SSSE3: phsubd
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phsubd %xmm0, %xmm0
+; SSSE3-NEXT:    retq
+;
 ; AVX-LABEL: phsubd2:
-; AVX: vphsubd
-define <4 x i32> @phsubd2(<4 x i32> %x) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vphsubd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 undef, i32 2, i32 4, i32 6>
   %b = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 undef, i32 3, i32 5, i32 7>
   %r = sub <4 x i32> %a, %b
   ret <4 x i32> %r
 }
 
+define <4 x i32> @phsubd3(<4 x i32> %x) {
 ; SSSE3-LABEL: phsubd3:
-; SSSE3-NOT: vphsubd
-; SSSE3: phsubd
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phsubd %xmm0, %xmm0
+; SSSE3-NEXT:    retq
+;
 ; AVX-LABEL: phsubd3:
-; AVX: vphsubd
-define <4 x i32> @phsubd3(<4 x i32> %x) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vphsubd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef>
   %b = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
   %r = sub <4 x i32> %a, %b
   ret <4 x i32> %r
 }
 
+define <4 x i32> @phsubd4(<4 x i32> %x) {
 ; SSSE3-LABEL: phsubd4:
-; SSSE3-NOT: vphsubd
-; SSSE3: phsubd
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    phsubd %xmm0, %xmm0
+; SSSE3-NEXT:    retq
+;
 ; AVX-LABEL: phsubd4:
-; AVX: vphsubd
-define <4 x i32> @phsubd4(<4 x i32> %x) {
+; AVX:       # BB#0:
+; AVX-NEXT:    vphsubd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %a = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
   %b = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
   %r = sub <4 x i32> %a, %b




More information about the llvm-commits mailing list