[llvm] r359671 - [X86][SSE] Add scalar horizontal add/sub tests for element extractions from upper lanes
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed May 1 04:17:11 PDT 2019
Author: rksimon
Date: Wed May 1 04:17:11 2019
New Revision: 359671
URL: http://llvm.org/viewvc/llvm-project?rev=359671&view=rev
Log:
[X86][SSE] Add scalar horizontal add/sub tests for element extractions from upper lanes
As suggested on D61263
Modified:
llvm/trunk/test/CodeGen/X86/haddsub.ll
llvm/trunk/test/CodeGen/X86/phaddsub-extract.ll
Modified: llvm/trunk/test/CodeGen/X86/haddsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/haddsub.ll?rev=359671&r1=359670&r2=359671&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/haddsub.ll (original)
+++ llvm/trunk/test/CodeGen/X86/haddsub.ll Wed May 1 04:17:11 2019
@@ -940,6 +940,29 @@ define float @extract_extract23_v8f32_fa
ret float %x01
}
+define float @extract_extract67_v8f32_fadd_f32(<8 x float> %x) {
+; SSE3-LABEL: extract_extract67_v8f32_fadd_f32:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movaps %xmm1, %xmm0
+; SSE3-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-NEXT: addss %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract67_v8f32_fadd_f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <8 x float> %x, i32 6
+ %x1 = extractelement <8 x float> %x, i32 7
+ %x01 = fadd float %x0, %x1
+ ret float %x01
+}
+
define float @extract_extract01_v8f32_fadd_f32_commute(<8 x float> %x) {
; SSE3-SLOW-LABEL: extract_extract01_v8f32_fadd_f32_commute:
; SSE3-SLOW: # %bb.0:
@@ -992,6 +1015,29 @@ define float @extract_extract23_v8f32_fa
ret float %x01
}
+define float @extract_extract67_v8f32_fadd_f32_commute(<8 x float> %x) {
+; SSE3-LABEL: extract_extract67_v8f32_fadd_f32_commute:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movaps %xmm1, %xmm0
+; SSE3-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-NEXT: addss %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract67_v8f32_fadd_f32_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <8 x float> %x, i32 6
+ %x1 = extractelement <8 x float> %x, i32 7
+ %x01 = fadd float %x1, %x0
+ ret float %x01
+}
+
define double @extract_extract01_v4f64_fadd_f64(<4 x double> %x) {
; SSE3-SLOW-LABEL: extract_extract01_v4f64_fadd_f64:
; SSE3-SLOW: # %bb.0:
@@ -1024,6 +1070,33 @@ define double @extract_extract01_v4f64_f
ret double %x01
}
+define double @extract_extract23_v4f64_fadd_f64(<4 x double> %x) {
+; SSE3-SLOW-LABEL: extract_extract23_v4f64_fadd_f64:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movapd %xmm1, %xmm0
+; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; SSE3-SLOW-NEXT: addsd %xmm1, %xmm0
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract23_v4f64_fadd_f64:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: movapd %xmm1, %xmm0
+; SSE3-FAST-NEXT: haddpd %xmm1, %xmm0
+; SSE3-FAST-NEXT: retq
+;
+; AVX-LABEL: extract_extract23_v4f64_fadd_f64:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <4 x double> %x, i32 2
+ %x1 = extractelement <4 x double> %x, i32 3
+ %x01 = fadd double %x0, %x1
+ ret double %x01
+}
+
define double @extract_extract01_v4f64_fadd_f64_commute(<4 x double> %x) {
; SSE3-SLOW-LABEL: extract_extract01_v4f64_fadd_f64_commute:
; SSE3-SLOW: # %bb.0:
@@ -1056,6 +1129,33 @@ define double @extract_extract01_v4f64_f
ret double %x01
}
+define double @extract_extract23_v4f64_fadd_f64_commute(<4 x double> %x) {
+; SSE3-SLOW-LABEL: extract_extract23_v4f64_fadd_f64_commute:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movapd %xmm1, %xmm0
+; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; SSE3-SLOW-NEXT: addsd %xmm1, %xmm0
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract23_v4f64_fadd_f64_commute:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: movapd %xmm1, %xmm0
+; SSE3-FAST-NEXT: haddpd %xmm1, %xmm0
+; SSE3-FAST-NEXT: retq
+;
+; AVX-LABEL: extract_extract23_v4f64_fadd_f64_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <4 x double> %x, i32 2
+ %x1 = extractelement <4 x double> %x, i32 3
+ %x01 = fadd double %x1, %x0
+ ret double %x01
+}
+
define float @extract_extract01_v8f32_fsub_f32(<8 x float> %x) {
; SSE3-SLOW-LABEL: extract_extract01_v8f32_fsub_f32:
; SSE3-SLOW: # %bb.0:
@@ -1108,6 +1208,33 @@ define float @extract_extract23_v8f32_fs
%x01 = fsub float %x0, %x1
ret float %x01
}
+
+define float @extract_extract45_v8f32_fsub_f32(<8 x float> %x) {
+; SSE3-SLOW-LABEL: extract_extract45_v8f32_fsub_f32:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movaps %xmm1, %xmm0
+; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE3-SLOW-NEXT: subss %xmm1, %xmm0
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract45_v8f32_fsub_f32:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: movaps %xmm1, %xmm0
+; SSE3-FAST-NEXT: hsubps %xmm1, %xmm0
+; SSE3-FAST-NEXT: retq
+;
+; AVX-LABEL: extract_extract45_v8f32_fsub_f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <8 x float> %x, i32 4
+ %x1 = extractelement <8 x float> %x, i32 5
+ %x01 = fsub float %x0, %x1
+ ret float %x01
+}
; Negative test...or get hoppy and negate?
Modified: llvm/trunk/test/CodeGen/X86/phaddsub-extract.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/phaddsub-extract.ll?rev=359671&r1=359670&r2=359671&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/phaddsub-extract.ll (original)
+++ llvm/trunk/test/CodeGen/X86/phaddsub-extract.ll Wed May 1 04:17:11 2019
@@ -496,6 +496,30 @@ define i32 @extract_extract23_v8i32_add_
ret i32 %x01
}
+define i32 @extract_extract67_v8i32_add_i32(<8 x i32> %x) {
+; SSE3-LABEL: extract_extract67_v8i32_add_i32:
+; SSE3: # %bb.0:
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE3-NEXT: movd %xmm0, %ecx
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
+; SSE3-NEXT: movd %xmm0, %eax
+; SSE3-NEXT: addl %ecx, %eax
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract67_v8i32_add_i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX-NEXT: vextractps $2, %xmm0, %ecx
+; AVX-NEXT: vextractps $3, %xmm0, %eax
+; AVX-NEXT: addl %ecx, %eax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <8 x i32> %x, i32 6
+ %x1 = extractelement <8 x i32> %x, i32 7
+ %x01 = add i32 %x0, %x1
+ ret i32 %x01
+}
+
define i32 @extract_extract01_v8i32_add_i32_commute(<8 x i32> %x) {
; SSE3-SLOW-LABEL: extract_extract01_v8i32_add_i32_commute:
; SSE3-SLOW: # %bb.0:
@@ -554,6 +578,30 @@ define i32 @extract_extract23_v8i32_add_
ret i32 %x01
}
+define i32 @extract_extract67_v8i32_add_i32_commute(<8 x i32> %x) {
+; SSE3-LABEL: extract_extract67_v8i32_add_i32_commute:
+; SSE3: # %bb.0:
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE3-NEXT: movd %xmm0, %ecx
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
+; SSE3-NEXT: movd %xmm0, %eax
+; SSE3-NEXT: addl %ecx, %eax
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract67_v8i32_add_i32_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX-NEXT: vextractps $2, %xmm0, %ecx
+; AVX-NEXT: vextractps $3, %xmm0, %eax
+; AVX-NEXT: addl %ecx, %eax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <8 x i32> %x, i32 6
+ %x1 = extractelement <8 x i32> %x, i32 7
+ %x01 = add i32 %x1, %x0
+ ret i32 %x01
+}
+
define i16 @extract_extract01_v16i16_add_i16(<16 x i16> %x) {
; SSE3-SLOW-LABEL: extract_extract01_v16i16_add_i16:
; SSE3-SLOW: # %bb.0:
@@ -615,6 +663,87 @@ define i16 @extract_extract23_v16i16_add
ret i16 %x01
}
+define i16 @extract_extract89_v16i16_add_i16(<16 x i16> %x) {
+; SSE3-SLOW-LABEL: extract_extract89_v16i16_add_i16:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm1, %ecx
+; SSE3-SLOW-NEXT: pextrw $1, %xmm1, %eax
+; SSE3-SLOW-NEXT: addl %ecx, %eax
+; SSE3-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract89_v16i16_add_i16:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: phaddw %xmm1, %xmm1
+; SSE3-FAST-NEXT: movd %xmm1, %eax
+; SSE3-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX1-SLOW-LABEL: extract_extract89_v16i16_add_i16:
+; AVX1-SLOW: # %bb.0:
+; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-SLOW-NEXT: vmovd %xmm0, %ecx
+; AVX1-SLOW-NEXT: vpextrw $1, %xmm0, %eax
+; AVX1-SLOW-NEXT: addl %ecx, %eax
+; AVX1-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX1-SLOW-NEXT: vzeroupper
+; AVX1-SLOW-NEXT: retq
+;
+; AVX1-FAST-LABEL: extract_extract89_v16i16_add_i16:
+; AVX1-FAST: # %bb.0:
+; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-FAST-NEXT: vmovd %xmm0, %ecx
+; AVX1-FAST-NEXT: vpextrw $1, %xmm0, %eax
+; AVX1-FAST-NEXT: addl %ecx, %eax
+; AVX1-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX1-FAST-NEXT: vzeroupper
+; AVX1-FAST-NEXT: retq
+;
+; AVX2-SLOW-LABEL: extract_extract89_v16i16_add_i16:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-SLOW-NEXT: vmovd %xmm0, %ecx
+; AVX2-SLOW-NEXT: vpextrw $1, %xmm0, %eax
+; AVX2-SLOW-NEXT: addl %ecx, %eax
+; AVX2-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: extract_extract89_v16i16_add_i16:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-FAST-NEXT: vmovd %xmm0, %ecx
+; AVX2-FAST-NEXT: vpextrw $1, %xmm0, %eax
+; AVX2-FAST-NEXT: addl %ecx, %eax
+; AVX2-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-SLOW-LABEL: extract_extract89_v16i16_add_i16:
+; AVX512-SLOW: # %bb.0:
+; AVX512-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-SLOW-NEXT: vmovd %xmm0, %ecx
+; AVX512-SLOW-NEXT: vpextrw $1, %xmm0, %eax
+; AVX512-SLOW-NEXT: addl %ecx, %eax
+; AVX512-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX512-SLOW-NEXT: vzeroupper
+; AVX512-SLOW-NEXT: retq
+;
+; AVX512-FAST-LABEL: extract_extract89_v16i16_add_i16:
+; AVX512-FAST: # %bb.0:
+; AVX512-FAST-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-FAST-NEXT: vmovd %xmm0, %ecx
+; AVX512-FAST-NEXT: vpextrw $1, %xmm0, %eax
+; AVX512-FAST-NEXT: addl %ecx, %eax
+; AVX512-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX512-FAST-NEXT: vzeroupper
+; AVX512-FAST-NEXT: retq
+ %x0 = extractelement <16 x i16> %x, i32 8
+ %x1 = extractelement <16 x i16> %x, i32 9
+ %x01 = add i16 %x0, %x1
+ ret i16 %x01
+}
+
define i16 @extract_extract01_v16i16_add_i16_commute(<16 x i16> %x) {
; SSE3-SLOW-LABEL: extract_extract01_v16i16_add_i16_commute:
; SSE3-SLOW: # %bb.0:
@@ -676,6 +805,87 @@ define i16 @extract_extract45_v16i16_add
ret i16 %x01
}
+define i16 @extract_extract89_v16i16_add_i16_commute(<16 x i16> %x) {
+; SSE3-SLOW-LABEL: extract_extract89_v16i16_add_i16_commute:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm1, %ecx
+; SSE3-SLOW-NEXT: pextrw $1, %xmm1, %eax
+; SSE3-SLOW-NEXT: addl %ecx, %eax
+; SSE3-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract89_v16i16_add_i16_commute:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: phaddw %xmm1, %xmm1
+; SSE3-FAST-NEXT: movd %xmm1, %eax
+; SSE3-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX1-SLOW-LABEL: extract_extract89_v16i16_add_i16_commute:
+; AVX1-SLOW: # %bb.0:
+; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-SLOW-NEXT: vmovd %xmm0, %ecx
+; AVX1-SLOW-NEXT: vpextrw $1, %xmm0, %eax
+; AVX1-SLOW-NEXT: addl %ecx, %eax
+; AVX1-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX1-SLOW-NEXT: vzeroupper
+; AVX1-SLOW-NEXT: retq
+;
+; AVX1-FAST-LABEL: extract_extract89_v16i16_add_i16_commute:
+; AVX1-FAST: # %bb.0:
+; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-FAST-NEXT: vmovd %xmm0, %ecx
+; AVX1-FAST-NEXT: vpextrw $1, %xmm0, %eax
+; AVX1-FAST-NEXT: addl %ecx, %eax
+; AVX1-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX1-FAST-NEXT: vzeroupper
+; AVX1-FAST-NEXT: retq
+;
+; AVX2-SLOW-LABEL: extract_extract89_v16i16_add_i16_commute:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-SLOW-NEXT: vmovd %xmm0, %ecx
+; AVX2-SLOW-NEXT: vpextrw $1, %xmm0, %eax
+; AVX2-SLOW-NEXT: addl %ecx, %eax
+; AVX2-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: extract_extract89_v16i16_add_i16_commute:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-FAST-NEXT: vmovd %xmm0, %ecx
+; AVX2-FAST-NEXT: vpextrw $1, %xmm0, %eax
+; AVX2-FAST-NEXT: addl %ecx, %eax
+; AVX2-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-SLOW-LABEL: extract_extract89_v16i16_add_i16_commute:
+; AVX512-SLOW: # %bb.0:
+; AVX512-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-SLOW-NEXT: vmovd %xmm0, %ecx
+; AVX512-SLOW-NEXT: vpextrw $1, %xmm0, %eax
+; AVX512-SLOW-NEXT: addl %ecx, %eax
+; AVX512-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX512-SLOW-NEXT: vzeroupper
+; AVX512-SLOW-NEXT: retq
+;
+; AVX512-FAST-LABEL: extract_extract89_v16i16_add_i16_commute:
+; AVX512-FAST: # %bb.0:
+; AVX512-FAST-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-FAST-NEXT: vmovd %xmm0, %ecx
+; AVX512-FAST-NEXT: vpextrw $1, %xmm0, %eax
+; AVX512-FAST-NEXT: addl %ecx, %eax
+; AVX512-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX512-FAST-NEXT: vzeroupper
+; AVX512-FAST-NEXT: retq
+ %x0 = extractelement <16 x i16> %x, i32 8
+ %x1 = extractelement <16 x i16> %x, i32 9
+ %x01 = add i16 %x1, %x0
+ ret i16 %x01
+}
+
define i32 @extract_extract01_v8i32_sub_i32(<8 x i32> %x) {
; SSE3-SLOW-LABEL: extract_extract01_v8i32_sub_i32:
; SSE3-SLOW: # %bb.0:
@@ -733,6 +943,30 @@ define i32 @extract_extract23_v8i32_sub_
%x01 = sub i32 %x0, %x1
ret i32 %x01
}
+
+define i32 @extract_extract67_v8i32_sub_i32(<8 x i32> %x) {
+; SSE3-LABEL: extract_extract67_v8i32_sub_i32:
+; SSE3: # %bb.0:
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE3-NEXT: movd %xmm0, %eax
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
+; SSE3-NEXT: movd %xmm0, %ecx
+; SSE3-NEXT: subl %ecx, %eax
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract67_v8i32_sub_i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX-NEXT: vextractps $2, %xmm0, %eax
+; AVX-NEXT: vextractps $3, %xmm0, %ecx
+; AVX-NEXT: subl %ecx, %eax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <8 x i32> %x, i32 6
+ %x1 = extractelement <8 x i32> %x, i32 7
+ %x01 = sub i32 %x0, %x1
+ ret i32 %x01
+}
; Negative test...or get hoppy and negate?
More information about the llvm-commits
mailing list