[llvm] r350221 - [x86] add more tests for potential horizontal ops; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 2 08:36:04 PST 2019


Author: spatel
Date: Wed Jan  2 08:36:04 2019
New Revision: 350221

URL: http://llvm.org/viewvc/llvm-project?rev=350221&view=rev
Log:
[x86] add more tests for potential horizontal ops; NFC

As discussed in D56011 - add runs for AVX512 and tests with extra uses.

Modified:
    llvm/trunk/test/CodeGen/X86/haddsub.ll

Modified: llvm/trunk/test/CodeGen/X86/haddsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/haddsub.ll?rev=350221&r1=350220&r2=350221&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/haddsub.ll (original)
+++ llvm/trunk/test/CodeGen/X86/haddsub.ll Wed Jan  2 08:36:04 2019
@@ -1,8 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3           | FileCheck %s --check-prefixes=SSE3,SSE3-SLOW
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3,fast-hops | FileCheck %s --check-prefixes=SSE3,SSE3-FAST
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx            | FileCheck %s --check-prefixes=AVX,AVX-SLOW
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops  | FileCheck %s --check-prefixes=AVX,AVX-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3               | FileCheck %s --check-prefixes=SSE3,SSE3-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3,fast-hops     | FileCheck %s --check-prefixes=SSE3,SSE3-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx                | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX1-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops      | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX1-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f            | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX512-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,fast-hops  | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX512-FAST
 
 define <2 x double> @haddpd1(<2 x double> %x, <2 x double> %y) {
 ; SSE3-LABEL: haddpd1:
@@ -583,6 +585,8 @@ define <2 x float> @haddps_v2f32(<4 x fl
   ret <2 x float> %res1
 }
 
+; 128-bit vectors, float/double, fadd/fsub
+
 define float @extract_extract_v4f32_fadd_f32(<4 x float> %x) {
 ; SSE3-LABEL: extract_extract_v4f32_fadd_f32:
 ; SSE3:       # %bb.0:
@@ -619,6 +623,124 @@ define float @extract_extract_v4f32_fadd
   ret float %x01
 }
 
+define double @extract_extract_v2f64_fadd_f64(<2 x double> %x) {
+; SSE3-LABEL: extract_extract_v2f64_fadd_f64:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movapd %xmm0, %xmm1
+; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT:    addsd %xmm0, %xmm1
+; SSE3-NEXT:    movapd %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v2f64_fadd_f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %x0 = extractelement <2 x double> %x, i32 0
+  %x1 = extractelement <2 x double> %x, i32 1
+  %x01 = fadd double %x0, %x1
+  ret double %x01
+}
+
+define double @extract_extract_v2f64_fadd_f64_commute(<2 x double> %x) {
+; SSE3-LABEL: extract_extract_v2f64_fadd_f64_commute:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movapd %xmm0, %xmm1
+; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT:    addsd %xmm0, %xmm1
+; SSE3-NEXT:    movapd %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v2f64_fadd_f64_commute:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %x0 = extractelement <2 x double> %x, i32 0
+  %x1 = extractelement <2 x double> %x, i32 1
+  %x01 = fadd double %x1, %x0
+  ret double %x01
+}
+
+define float @extract_extract_v4f32_fsub_f32(<4 x float> %x) {
+; SSE3-LABEL: extract_extract_v4f32_fsub_f32:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT:    subss %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v4f32_fsub_f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %x0 = extractelement <4 x float> %x, i32 0
+  %x1 = extractelement <4 x float> %x, i32 1
+  %x01 = fsub float %x0, %x1
+  ret float %x01
+}
+
+define float @extract_extract_v4f32_fsub_f32_commute(<4 x float> %x) {
+; SSE3-LABEL: extract_extract_v4f32_fsub_f32_commute:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT:    subss %xmm0, %xmm1
+; SSE3-NEXT:    movaps %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v4f32_fsub_f32_commute:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT:    vsubss %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %x0 = extractelement <4 x float> %x, i32 0
+  %x1 = extractelement <4 x float> %x, i32 1
+  %x01 = fsub float %x1, %x0
+  ret float %x01
+}
+
+define double @extract_extract_v2f64_fsub_f64(<2 x double> %x) {
+; SSE3-LABEL: extract_extract_v2f64_fsub_f64:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movapd %xmm0, %xmm1
+; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT:    subsd %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v2f64_fsub_f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %x0 = extractelement <2 x double> %x, i32 0
+  %x1 = extractelement <2 x double> %x, i32 1
+  %x01 = fsub double %x0, %x1
+  ret double %x01
+}
+
+define double @extract_extract_v2f64_fsub_f64_commute(<2 x double> %x) {
+; SSE3-LABEL: extract_extract_v2f64_fsub_f64_commute:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movapd %xmm0, %xmm1
+; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT:    subsd %xmm0, %xmm1
+; SSE3-NEXT:    movapd %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v2f64_fsub_f64_commute:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT:    vsubsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %x0 = extractelement <2 x double> %x, i32 0
+  %x1 = extractelement <2 x double> %x, i32 1
+  %x01 = fsub double %x1, %x0
+  ret double %x01
+}
+
+; 256-bit vectors, float/double, fadd/fsub
+
 define float @extract_extract_v8f32_fadd_f32(<8 x float> %x) {
 ; SSE3-LABEL: extract_extract_v8f32_fadd_f32:
 ; SSE3:       # %bb.0:
@@ -657,41 +779,46 @@ define float @extract_extract_v8f32_fadd
   ret float %x01
 }
 
-define float @extract_extract_v4f32_fsub_f32(<4 x float> %x) {
-; SSE3-LABEL: extract_extract_v4f32_fsub_f32:
+define double @extract_extract_v4f64_fadd_f64(<4 x double> %x) {
+; SSE3-LABEL: extract_extract_v4f64_fadd_f64:
 ; SSE3:       # %bb.0:
-; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE3-NEXT:    subss %xmm1, %xmm0
+; SSE3-NEXT:    movapd %xmm0, %xmm1
+; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT:    addsd %xmm0, %xmm1
+; SSE3-NEXT:    movapd %xmm1, %xmm0
 ; SSE3-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract_v4f32_fsub_f32:
+; AVX-LABEL: extract_extract_v4f64_fadd_f64:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
-  %x0 = extractelement <4 x float> %x, i32 0
-  %x1 = extractelement <4 x float> %x, i32 1
-  %x01 = fsub float %x0, %x1
-  ret float %x01
+  %x0 = extractelement <4 x double> %x, i32 0
+  %x1 = extractelement <4 x double> %x, i32 1
+  %x01 = fadd double %x0, %x1
+  ret double %x01
 }
 
-define float @extract_extract_v4f32_fsub_f32_commute(<4 x float> %x) {
-; SSE3-LABEL: extract_extract_v4f32_fsub_f32_commute:
+define double @extract_extract_v4f64_fadd_f64_commute(<4 x double> %x) {
+; SSE3-LABEL: extract_extract_v4f64_fadd_f64_commute:
 ; SSE3:       # %bb.0:
-; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE3-NEXT:    subss %xmm0, %xmm1
-; SSE3-NEXT:    movaps %xmm1, %xmm0
+; SSE3-NEXT:    movapd %xmm0, %xmm1
+; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT:    addsd %xmm0, %xmm1
+; SSE3-NEXT:    movapd %xmm1, %xmm0
 ; SSE3-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract_v4f32_fsub_f32_commute:
+; AVX-LABEL: extract_extract_v4f64_fadd_f64_commute:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT:    vsubss %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
-  %x0 = extractelement <4 x float> %x, i32 0
-  %x1 = extractelement <4 x float> %x, i32 1
-  %x01 = fsub float %x1, %x0
-  ret float %x01
+  %x0 = extractelement <4 x double> %x, i32 0
+  %x1 = extractelement <4 x double> %x, i32 1
+  %x01 = fadd double %x1, %x0
+  ret double %x01
 }
 
 define float @extract_extract_v8f32_fsub_f32(<8 x float> %x) {
@@ -733,48 +860,89 @@ define float @extract_extract_v8f32_fsub
   ret float %x01
 }
 
-define double @extract_extract_v2f64_fadd_f64(<2 x double> %x) {
-; SSE3-LABEL: extract_extract_v2f64_fadd_f64:
+define double @extract_extract_v4f64_fsub_f64(<4 x double> %x) {
+; SSE3-LABEL: extract_extract_v4f64_fsub_f64:
 ; SSE3:       # %bb.0:
 ; SSE3-NEXT:    movapd %xmm0, %xmm1
 ; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE3-NEXT:    addsd %xmm0, %xmm1
-; SSE3-NEXT:    movapd %xmm1, %xmm0
+; SSE3-NEXT:    subsd %xmm1, %xmm0
 ; SSE3-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract_v2f64_fadd_f64:
+; AVX-LABEL: extract_extract_v4f64_fsub_f64:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
-  %x0 = extractelement <2 x double> %x, i32 0
-  %x1 = extractelement <2 x double> %x, i32 1
-  %x01 = fadd double %x0, %x1
+  %x0 = extractelement <4 x double> %x, i32 0
+  %x1 = extractelement <4 x double> %x, i32 1
+  %x01 = fsub double %x0, %x1
   ret double %x01
 }
 
-define double @extract_extract_v2f64_fadd_f64_commute(<2 x double> %x) {
-; SSE3-LABEL: extract_extract_v2f64_fadd_f64_commute:
+define double @extract_extract_v4f64_fsub_f64_commute(<4 x double> %x) {
+; SSE3-LABEL: extract_extract_v4f64_fsub_f64_commute:
 ; SSE3:       # %bb.0:
 ; SSE3-NEXT:    movapd %xmm0, %xmm1
 ; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE3-NEXT:    addsd %xmm0, %xmm1
+; SSE3-NEXT:    subsd %xmm0, %xmm1
 ; SSE3-NEXT:    movapd %xmm1, %xmm0
 ; SSE3-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract_v2f64_fadd_f64_commute:
+; AVX-LABEL: extract_extract_v4f64_fsub_f64_commute:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vsubsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
-  %x0 = extractelement <2 x double> %x, i32 0
-  %x1 = extractelement <2 x double> %x, i32 1
-  %x01 = fadd double %x1, %x0
+  %x0 = extractelement <4 x double> %x, i32 0
+  %x1 = extractelement <4 x double> %x, i32 1
+  %x01 = fsub double %x1, %x0
   ret double %x01
 }
 
-define double @extract_extract_v4f64_fadd_f64(<4 x double> %x) {
-; SSE3-LABEL: extract_extract_v4f64_fadd_f64:
+; 512-bit vectors, float/double, fadd/fsub
+
+define float @extract_extract_v16f32_fadd_f32(<16 x float> %x) {
+; SSE3-LABEL: extract_extract_v16f32_fadd_f32:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT:    addss %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v16f32_fadd_f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    retq
+  %x0 = extractelement <16 x float> %x, i32 0
+  %x1 = extractelement <16 x float> %x, i32 1
+  %x01 = fadd float %x0, %x1
+  ret float %x01
+}
+
+define float @extract_extract_v16f32_fadd_f32_commute(<16 x float> %x) {
+; SSE3-LABEL: extract_extract_v16f32_fadd_f32_commute:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT:    addss %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v16f32_fadd_f32_commute:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    retq
+  %x0 = extractelement <16 x float> %x, i32 0
+  %x1 = extractelement <16 x float> %x, i32 1
+  %x01 = fadd float %x1, %x0
+  ret float %x01
+}
+
+define double @extract_extract_v8f64_fadd_f64(<8 x double> %x) {
+; SSE3-LABEL: extract_extract_v8f64_fadd_f64:
 ; SSE3:       # %bb.0:
 ; SSE3-NEXT:    movapd %xmm0, %xmm1
 ; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
@@ -782,20 +950,20 @@ define double @extract_extract_v4f64_fad
 ; SSE3-NEXT:    movapd %xmm1, %xmm0
 ; SSE3-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract_v4f64_fadd_f64:
+; AVX-LABEL: extract_extract_v8f64_fadd_f64:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
-  %x0 = extractelement <4 x double> %x, i32 0
-  %x1 = extractelement <4 x double> %x, i32 1
+  %x0 = extractelement <8 x double> %x, i32 0
+  %x1 = extractelement <8 x double> %x, i32 1
   %x01 = fadd double %x0, %x1
   ret double %x01
 }
 
-define double @extract_extract_v4f64_fadd_f64_commute(<4 x double> %x) {
-; SSE3-LABEL: extract_extract_v4f64_fadd_f64_commute:
+define double @extract_extract_v8f64_fadd_f64_commute(<8 x double> %x) {
+; SSE3-LABEL: extract_extract_v8f64_fadd_f64_commute:
 ; SSE3:       # %bb.0:
 ; SSE3-NEXT:    movapd %xmm0, %xmm1
 ; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
@@ -803,79 +971,79 @@ define double @extract_extract_v4f64_fad
 ; SSE3-NEXT:    movapd %xmm1, %xmm0
 ; SSE3-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract_v4f64_fadd_f64_commute:
+; AVX-LABEL: extract_extract_v8f64_fadd_f64_commute:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; AVX-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
-  %x0 = extractelement <4 x double> %x, i32 0
-  %x1 = extractelement <4 x double> %x, i32 1
+  %x0 = extractelement <8 x double> %x, i32 0
+  %x1 = extractelement <8 x double> %x, i32 1
   %x01 = fadd double %x1, %x0
   ret double %x01
 }
 
-define double @extract_extract_v2f64_fsub_f64(<2 x double> %x) {
-; SSE3-LABEL: extract_extract_v2f64_fsub_f64:
+define float @extract_extract_v16f32_fsub_f32(<16 x float> %x) {
+; SSE3-LABEL: extract_extract_v16f32_fsub_f32:
 ; SSE3:       # %bb.0:
-; SSE3-NEXT:    movapd %xmm0, %xmm1
-; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE3-NEXT:    subsd %xmm1, %xmm0
+; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT:    subss %xmm1, %xmm0
 ; SSE3-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract_v2f64_fsub_f64:
+; AVX-LABEL: extract_extract_v16f32_fsub_f32:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
-  %x0 = extractelement <2 x double> %x, i32 0
-  %x1 = extractelement <2 x double> %x, i32 1
-  %x01 = fsub double %x0, %x1
-  ret double %x01
+  %x0 = extractelement <16 x float> %x, i32 0
+  %x1 = extractelement <16 x float> %x, i32 1
+  %x01 = fsub float %x0, %x1
+  ret float %x01
 }
 
-define double @extract_extract_v2f64_fsub_f64_commute(<2 x double> %x) {
-; SSE3-LABEL: extract_extract_v2f64_fsub_f64_commute:
+define float @extract_extract_v16f32_fsub_f32_commute(<16 x float> %x) {
+; SSE3-LABEL: extract_extract_v16f32_fsub_f32_commute:
 ; SSE3:       # %bb.0:
-; SSE3-NEXT:    movapd %xmm0, %xmm1
-; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE3-NEXT:    subsd %xmm0, %xmm1
-; SSE3-NEXT:    movapd %xmm1, %xmm0
+; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT:    subss %xmm0, %xmm1
+; SSE3-NEXT:    movaps %xmm1, %xmm0
 ; SSE3-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract_v2f64_fsub_f64_commute:
+; AVX-LABEL: extract_extract_v16f32_fsub_f32_commute:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vsubsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT:    vsubss %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
-  %x0 = extractelement <2 x double> %x, i32 0
-  %x1 = extractelement <2 x double> %x, i32 1
-  %x01 = fsub double %x1, %x0
-  ret double %x01
+  %x0 = extractelement <16 x float> %x, i32 0
+  %x1 = extractelement <16 x float> %x, i32 1
+  %x01 = fsub float %x1, %x0
+  ret float %x01
 }
 
-define double @extract_extract_v4f64_fsub_f64(<4 x double> %x) {
-; SSE3-LABEL: extract_extract_v4f64_fsub_f64:
+define double @extract_extract_v8f64_fsub_f64(<8 x double> %x) {
+; SSE3-LABEL: extract_extract_v8f64_fsub_f64:
 ; SSE3:       # %bb.0:
 ; SSE3-NEXT:    movapd %xmm0, %xmm1
 ; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
 ; SSE3-NEXT:    subsd %xmm1, %xmm0
 ; SSE3-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract_v4f64_fsub_f64:
+; AVX-LABEL: extract_extract_v8f64_fsub_f64:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; AVX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
-  %x0 = extractelement <4 x double> %x, i32 0
-  %x1 = extractelement <4 x double> %x, i32 1
+  %x0 = extractelement <8 x double> %x, i32 0
+  %x1 = extractelement <8 x double> %x, i32 1
   %x01 = fsub double %x0, %x1
   ret double %x01
 }
 
-define double @extract_extract_v4f64_fsub_f64_commute(<4 x double> %x) {
-; SSE3-LABEL: extract_extract_v4f64_fsub_f64_commute:
+define double @extract_extract_v8f64_fsub_f64_commute(<8 x double> %x) {
+; SSE3-LABEL: extract_extract_v8f64_fsub_f64_commute:
 ; SSE3:       # %bb.0:
 ; SSE3-NEXT:    movapd %xmm0, %xmm1
 ; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
@@ -883,15 +1051,83 @@ define double @extract_extract_v4f64_fsu
 ; SSE3-NEXT:    movapd %xmm1, %xmm0
 ; SSE3-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract_v4f64_fsub_f64_commute:
+; AVX-LABEL: extract_extract_v8f64_fsub_f64_commute:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; AVX-NEXT:    vsubsd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
-  %x0 = extractelement <4 x double> %x, i32 0
-  %x1 = extractelement <4 x double> %x, i32 1
+  %x0 = extractelement <8 x double> %x, i32 0
+  %x1 = extractelement <8 x double> %x, i32 1
   %x01 = fsub double %x1, %x0
   ret double %x01
 }
 
+; Check output when 1 or both extracts have extra uses.
+
+define float @extract_extract_v4f32_fadd_f32_uses1(<4 x float> %x, float* %p) {
+; SSE3-LABEL: extract_extract_v4f32_fadd_f32_uses1:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movss %xmm0, (%rdi)
+; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT:    addss %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v4f32_fadd_f32_uses1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovss %xmm0, (%rdi)
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %x0 = extractelement <4 x float> %x, i32 0
+  store float %x0, float* %p
+  %x1 = extractelement <4 x float> %x, i32 1
+  %x01 = fadd float %x0, %x1
+  ret float %x01
+}
+
+define float @extract_extract_v4f32_fadd_f32_uses2(<4 x float> %x, float* %p) {
+; SSE3-LABEL: extract_extract_v4f32_fadd_f32_uses2:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT:    movss %xmm1, (%rdi)
+; SSE3-NEXT:    addss %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v4f32_fadd_f32_uses2:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT:    vmovss %xmm1, (%rdi)
+; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %x0 = extractelement <4 x float> %x, i32 0
+  %x1 = extractelement <4 x float> %x, i32 1
+  store float %x1, float* %p
+  %x01 = fadd float %x0, %x1
+  ret float %x01
+}
+
+define float @extract_extract_v4f32_fadd_f32_uses3(<4 x float> %x, float* %p1, float* %p2) {
+; SSE3-LABEL: extract_extract_v4f32_fadd_f32_uses3:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    movss %xmm0, (%rdi)
+; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT:    movss %xmm1, (%rsi)
+; SSE3-NEXT:    addss %xmm1, %xmm0
+; SSE3-NEXT:    retq
+;
+; AVX-LABEL: extract_extract_v4f32_fadd_f32_uses3:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovss %xmm0, (%rdi)
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT:    vmovss %xmm1, (%rsi)
+; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %x0 = extractelement <4 x float> %x, i32 0
+  store float %x0, float* %p1
+  %x1 = extractelement <4 x float> %x, i32 1
+  store float %x1, float* %p2
+  %x01 = fadd float %x0, %x1
+  ret float %x01
+}
+




More information about the llvm-commits mailing list