[llvm] r349928 - [x86] move misplaced tests; NFC
Sanjay Patel via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 21 09:06:43 PST 2018
Author: spatel
Date: Fri Dec 21 09:06:43 2018
New Revision: 349928
URL: http://llvm.org/viewvc/llvm-project?rev=349928&view=rev
Log:
[x86] move misplaced tests; NFC
Mixed up integer and FP in rL349923.
Modified:
llvm/trunk/test/CodeGen/X86/haddsub.ll
llvm/trunk/test/CodeGen/X86/phaddsub.ll
Modified: llvm/trunk/test/CodeGen/X86/haddsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/haddsub.ll?rev=349928&r1=349927&r2=349928&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/haddsub.ll (original)
+++ llvm/trunk/test/CodeGen/X86/haddsub.ll Fri Dec 21 09:06:43 2018
@@ -583,3 +583,427 @@ define <2 x float> @haddps_v2f32(<4 x fl
ret <2 x float> %res1
}
+define float @extract_extract_v4f32_fadd_f32(<4 x float> %x) {
+; SSSE3-LABEL: extract_extract_v4f32_fadd_f32:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSSE3-NEXT: addss %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE3-LABEL: extract_extract_v4f32_fadd_f32:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT: addss %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract_v4f32_fadd_f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %x0 = extractelement <4 x float> %x, i32 0
+ %x1 = extractelement <4 x float> %x, i32 1
+ %x01 = fadd float %x0, %x1
+ ret float %x01
+}
+
+define float @extract_extract_v4f32_fadd_f32_commute(<4 x float> %x) {
+; SSSE3-LABEL: extract_extract_v4f32_fadd_f32_commute:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSSE3-NEXT: addss %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE3-LABEL: extract_extract_v4f32_fadd_f32_commute:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT: addss %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract_v4f32_fadd_f32_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %x0 = extractelement <4 x float> %x, i32 0
+ %x1 = extractelement <4 x float> %x, i32 1
+ %x01 = fadd float %x1, %x0
+ ret float %x01
+}
+
+define float @extract_extract_v8f32_fadd_f32(<8 x float> %x) {
+; SSSE3-LABEL: extract_extract_v8f32_fadd_f32:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSSE3-NEXT: addss %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE3-LABEL: extract_extract_v8f32_fadd_f32:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT: addss %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract_v8f32_fadd_f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <8 x float> %x, i32 0
+ %x1 = extractelement <8 x float> %x, i32 1
+ %x01 = fadd float %x0, %x1
+ ret float %x01
+}
+
+define float @extract_extract_v8f32_fadd_f32_commute(<8 x float> %x) {
+; SSSE3-LABEL: extract_extract_v8f32_fadd_f32_commute:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSSE3-NEXT: addss %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE3-LABEL: extract_extract_v8f32_fadd_f32_commute:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT: addss %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract_v8f32_fadd_f32_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <8 x float> %x, i32 0
+ %x1 = extractelement <8 x float> %x, i32 1
+ %x01 = fadd float %x1, %x0
+ ret float %x01
+}
+
+define float @extract_extract_v4f32_fsub_f32(<4 x float> %x) {
+; SSSE3-LABEL: extract_extract_v4f32_fsub_f32:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSSE3-NEXT: subss %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE3-LABEL: extract_extract_v4f32_fsub_f32:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT: subss %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract_v4f32_fsub_f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %x0 = extractelement <4 x float> %x, i32 0
+ %x1 = extractelement <4 x float> %x, i32 1
+ %x01 = fsub float %x0, %x1
+ ret float %x01
+}
+
+define float @extract_extract_v4f32_fsub_f32_commute(<4 x float> %x) {
+; SSSE3-LABEL: extract_extract_v4f32_fsub_f32_commute:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSSE3-NEXT: subss %xmm0, %xmm1
+; SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE3-LABEL: extract_extract_v4f32_fsub_f32_commute:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT: subss %xmm0, %xmm1
+; SSE3-NEXT: movaps %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract_v4f32_fsub_f32_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %x0 = extractelement <4 x float> %x, i32 0
+ %x1 = extractelement <4 x float> %x, i32 1
+ %x01 = fsub float %x1, %x0
+ ret float %x01
+}
+
+define float @extract_extract_v8f32_fsub_f32(<8 x float> %x) {
+; SSSE3-LABEL: extract_extract_v8f32_fsub_f32:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSSE3-NEXT: subss %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE3-LABEL: extract_extract_v8f32_fsub_f32:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT: subss %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract_v8f32_fsub_f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <8 x float> %x, i32 0
+ %x1 = extractelement <8 x float> %x, i32 1
+ %x01 = fsub float %x0, %x1
+ ret float %x01
+}
+
+define float @extract_extract_v8f32_fsub_f32_commute(<8 x float> %x) {
+; SSSE3-LABEL: extract_extract_v8f32_fsub_f32_commute:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSSE3-NEXT: subss %xmm0, %xmm1
+; SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE3-LABEL: extract_extract_v8f32_fsub_f32_commute:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-NEXT: subss %xmm0, %xmm1
+; SSE3-NEXT: movaps %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract_v8f32_fsub_f32_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <8 x float> %x, i32 0
+ %x1 = extractelement <8 x float> %x, i32 1
+ %x01 = fsub float %x1, %x0
+ ret float %x01
+}
+
+define double @extract_extract_v2f64_fadd_f64(<2 x double> %x) {
+; SSSE3-LABEL: extract_extract_v2f64_fadd_f64:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movapd %xmm0, %xmm1
+; SSSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSSE3-NEXT: addsd %xmm0, %xmm1
+; SSSE3-NEXT: movapd %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE3-LABEL: extract_extract_v2f64_fadd_f64:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movapd %xmm0, %xmm1
+; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT: addsd %xmm0, %xmm1
+; SSE3-NEXT: movapd %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract_v2f64_fadd_f64:
+; AVX: # %bb.0:
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %x0 = extractelement <2 x double> %x, i32 0
+ %x1 = extractelement <2 x double> %x, i32 1
+ %x01 = fadd double %x0, %x1
+ ret double %x01
+}
+
+define double @extract_extract_v2f64_fadd_f64_commute(<2 x double> %x) {
+; SSSE3-LABEL: extract_extract_v2f64_fadd_f64_commute:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movapd %xmm0, %xmm1
+; SSSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSSE3-NEXT: addsd %xmm0, %xmm1
+; SSSE3-NEXT: movapd %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE3-LABEL: extract_extract_v2f64_fadd_f64_commute:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movapd %xmm0, %xmm1
+; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT: addsd %xmm0, %xmm1
+; SSE3-NEXT: movapd %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract_v2f64_fadd_f64_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %x0 = extractelement <2 x double> %x, i32 0
+ %x1 = extractelement <2 x double> %x, i32 1
+ %x01 = fadd double %x1, %x0
+ ret double %x01
+}
+
+define double @extract_extract_v4f64_fadd_f64(<4 x double> %x) {
+; SSSE3-LABEL: extract_extract_v4f64_fadd_f64:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movapd %xmm0, %xmm1
+; SSSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSSE3-NEXT: addsd %xmm0, %xmm1
+; SSSE3-NEXT: movapd %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE3-LABEL: extract_extract_v4f64_fadd_f64:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movapd %xmm0, %xmm1
+; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT: addsd %xmm0, %xmm1
+; SSE3-NEXT: movapd %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract_v4f64_fadd_f64:
+; AVX: # %bb.0:
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <4 x double> %x, i32 0
+ %x1 = extractelement <4 x double> %x, i32 1
+ %x01 = fadd double %x0, %x1
+ ret double %x01
+}
+
+define double @extract_extract_v4f64_fadd_f64_commute(<4 x double> %x) {
+; SSSE3-LABEL: extract_extract_v4f64_fadd_f64_commute:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movapd %xmm0, %xmm1
+; SSSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSSE3-NEXT: addsd %xmm0, %xmm1
+; SSSE3-NEXT: movapd %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE3-LABEL: extract_extract_v4f64_fadd_f64_commute:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movapd %xmm0, %xmm1
+; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT: addsd %xmm0, %xmm1
+; SSE3-NEXT: movapd %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract_v4f64_fadd_f64_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <4 x double> %x, i32 0
+ %x1 = extractelement <4 x double> %x, i32 1
+ %x01 = fadd double %x1, %x0
+ ret double %x01
+}
+
+define double @extract_extract_v2f64_fsub_f64(<2 x double> %x) {
+; SSSE3-LABEL: extract_extract_v2f64_fsub_f64:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movapd %xmm0, %xmm1
+; SSSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSSE3-NEXT: subsd %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE3-LABEL: extract_extract_v2f64_fsub_f64:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movapd %xmm0, %xmm1
+; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT: subsd %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract_v2f64_fsub_f64:
+; AVX: # %bb.0:
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %x0 = extractelement <2 x double> %x, i32 0
+ %x1 = extractelement <2 x double> %x, i32 1
+ %x01 = fsub double %x0, %x1
+ ret double %x01
+}
+
+define double @extract_extract_v2f64_fsub_f64_commute(<2 x double> %x) {
+; SSSE3-LABEL: extract_extract_v2f64_fsub_f64_commute:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movapd %xmm0, %xmm1
+; SSSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSSE3-NEXT: subsd %xmm0, %xmm1
+; SSSE3-NEXT: movapd %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE3-LABEL: extract_extract_v2f64_fsub_f64_commute:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movapd %xmm0, %xmm1
+; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT: subsd %xmm0, %xmm1
+; SSE3-NEXT: movapd %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract_v2f64_fsub_f64_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %x0 = extractelement <2 x double> %x, i32 0
+ %x1 = extractelement <2 x double> %x, i32 1
+ %x01 = fsub double %x1, %x0
+ ret double %x01
+}
+
+define double @extract_extract_v4f64_fsub_f64(<4 x double> %x) {
+; SSSE3-LABEL: extract_extract_v4f64_fsub_f64:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movapd %xmm0, %xmm1
+; SSSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSSE3-NEXT: subsd %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE3-LABEL: extract_extract_v4f64_fsub_f64:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movapd %xmm0, %xmm1
+; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT: subsd %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract_v4f64_fsub_f64:
+; AVX: # %bb.0:
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <4 x double> %x, i32 0
+ %x1 = extractelement <4 x double> %x, i32 1
+ %x01 = fsub double %x0, %x1
+ ret double %x01
+}
+
+define double @extract_extract_v4f64_fsub_f64_commute(<4 x double> %x) {
+; SSSE3-LABEL: extract_extract_v4f64_fsub_f64_commute:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movapd %xmm0, %xmm1
+; SSSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSSE3-NEXT: subsd %xmm0, %xmm1
+; SSSE3-NEXT: movapd %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE3-LABEL: extract_extract_v4f64_fsub_f64_commute:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movapd %xmm0, %xmm1
+; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-NEXT: subsd %xmm0, %xmm1
+; SSE3-NEXT: movapd %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; AVX-LABEL: extract_extract_v4f64_fsub_f64_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %x0 = extractelement <4 x double> %x, i32 0
+ %x1 = extractelement <4 x double> %x, i32 1
+ %x01 = fsub double %x1, %x0
+ ret double %x01
+}
+
Modified: llvm/trunk/test/CodeGen/X86/phaddsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/phaddsub.ll?rev=349928&r1=349927&r2=349928&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/phaddsub.ll (original)
+++ llvm/trunk/test/CodeGen/X86/phaddsub.ll Fri Dec 21 09:06:43 2018
@@ -757,315 +757,3 @@ define <8 x i16> @phaddw_single_source6(
ret <8 x i16> %shuffle2
}
-define float @extract_extract_v4f32_fadd_f32(<4 x float> %x) {
-; SSSE3-LABEL: extract_extract_v4f32_fadd_f32:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSSE3-NEXT: addss %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v4f32_fadd_f32:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
- %x0 = extractelement <4 x float> %x, i32 0
- %x1 = extractelement <4 x float> %x, i32 1
- %x01 = fadd float %x0, %x1
- ret float %x01
-}
-
-define float @extract_extract_v4f32_fadd_f32_commute(<4 x float> %x) {
-; SSSE3-LABEL: extract_extract_v4f32_fadd_f32_commute:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSSE3-NEXT: addss %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v4f32_fadd_f32_commute:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
-; AVX-NEXT: retq
- %x0 = extractelement <4 x float> %x, i32 0
- %x1 = extractelement <4 x float> %x, i32 1
- %x01 = fadd float %x1, %x0
- ret float %x01
-}
-
-define float @extract_extract_v8f32_fadd_f32(<8 x float> %x) {
-; SSSE3-LABEL: extract_extract_v8f32_fadd_f32:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSSE3-NEXT: addss %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v8f32_fadd_f32:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
- %x0 = extractelement <8 x float> %x, i32 0
- %x1 = extractelement <8 x float> %x, i32 1
- %x01 = fadd float %x0, %x1
- ret float %x01
-}
-
-define float @extract_extract_v8f32_fadd_f32_commute(<8 x float> %x) {
-; SSSE3-LABEL: extract_extract_v8f32_fadd_f32_commute:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSSE3-NEXT: addss %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v8f32_fadd_f32_commute:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
- %x0 = extractelement <8 x float> %x, i32 0
- %x1 = extractelement <8 x float> %x, i32 1
- %x01 = fadd float %x1, %x0
- ret float %x01
-}
-
-define float @extract_extract_v4f32_fsub_f32(<4 x float> %x) {
-; SSSE3-LABEL: extract_extract_v4f32_fsub_f32:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSSE3-NEXT: subss %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v4f32_fsub_f32:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
- %x0 = extractelement <4 x float> %x, i32 0
- %x1 = extractelement <4 x float> %x, i32 1
- %x01 = fsub float %x0, %x1
- ret float %x01
-}
-
-define float @extract_extract_v4f32_fsub_f32_commute(<4 x float> %x) {
-; SSSE3-LABEL: extract_extract_v4f32_fsub_f32_commute:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSSE3-NEXT: subss %xmm0, %xmm1
-; SSSE3-NEXT: movaps %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v4f32_fsub_f32_commute:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
-; AVX-NEXT: retq
- %x0 = extractelement <4 x float> %x, i32 0
- %x1 = extractelement <4 x float> %x, i32 1
- %x01 = fsub float %x1, %x0
- ret float %x01
-}
-
-define float @extract_extract_v8f32_fsub_f32(<8 x float> %x) {
-; SSSE3-LABEL: extract_extract_v8f32_fsub_f32:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSSE3-NEXT: subss %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v8f32_fsub_f32:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
- %x0 = extractelement <8 x float> %x, i32 0
- %x1 = extractelement <8 x float> %x, i32 1
- %x01 = fsub float %x0, %x1
- ret float %x01
-}
-
-define float @extract_extract_v8f32_fsub_f32_commute(<8 x float> %x) {
-; SSSE3-LABEL: extract_extract_v8f32_fsub_f32_commute:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSSE3-NEXT: subss %xmm0, %xmm1
-; SSSE3-NEXT: movaps %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v8f32_fsub_f32_commute:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
- %x0 = extractelement <8 x float> %x, i32 0
- %x1 = extractelement <8 x float> %x, i32 1
- %x01 = fsub float %x1, %x0
- ret float %x01
-}
-
-define double @extract_extract_v2f64_fadd_f64(<2 x double> %x) {
-; SSSE3-LABEL: extract_extract_v2f64_fadd_f64:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: movapd %xmm0, %xmm1
-; SSSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSSE3-NEXT: addsd %xmm0, %xmm1
-; SSSE3-NEXT: movapd %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v2f64_fadd_f64:
-; AVX: # %bb.0:
-; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
- %x0 = extractelement <2 x double> %x, i32 0
- %x1 = extractelement <2 x double> %x, i32 1
- %x01 = fadd double %x0, %x1
- ret double %x01
-}
-
-define double @extract_extract_v2f64_fadd_f64_commute(<2 x double> %x) {
-; SSSE3-LABEL: extract_extract_v2f64_fadd_f64_commute:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: movapd %xmm0, %xmm1
-; SSSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSSE3-NEXT: addsd %xmm0, %xmm1
-; SSSE3-NEXT: movapd %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v2f64_fadd_f64_commute:
-; AVX: # %bb.0:
-; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
-; AVX-NEXT: retq
- %x0 = extractelement <2 x double> %x, i32 0
- %x1 = extractelement <2 x double> %x, i32 1
- %x01 = fadd double %x1, %x0
- ret double %x01
-}
-
-define double @extract_extract_v4f64_fadd_f64(<4 x double> %x) {
-; SSSE3-LABEL: extract_extract_v4f64_fadd_f64:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: movapd %xmm0, %xmm1
-; SSSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSSE3-NEXT: addsd %xmm0, %xmm1
-; SSSE3-NEXT: movapd %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v4f64_fadd_f64:
-; AVX: # %bb.0:
-; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
- %x0 = extractelement <4 x double> %x, i32 0
- %x1 = extractelement <4 x double> %x, i32 1
- %x01 = fadd double %x0, %x1
- ret double %x01
-}
-
-define double @extract_extract_v4f64_fadd_f64_commute(<4 x double> %x) {
-; SSSE3-LABEL: extract_extract_v4f64_fadd_f64_commute:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: movapd %xmm0, %xmm1
-; SSSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSSE3-NEXT: addsd %xmm0, %xmm1
-; SSSE3-NEXT: movapd %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v4f64_fadd_f64_commute:
-; AVX: # %bb.0:
-; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
- %x0 = extractelement <4 x double> %x, i32 0
- %x1 = extractelement <4 x double> %x, i32 1
- %x01 = fadd double %x1, %x0
- ret double %x01
-}
-
-define double @extract_extract_v2f64_fsub_f64(<2 x double> %x) {
-; SSSE3-LABEL: extract_extract_v2f64_fsub_f64:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: movapd %xmm0, %xmm1
-; SSSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSSE3-NEXT: subsd %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v2f64_fsub_f64:
-; AVX: # %bb.0:
-; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
- %x0 = extractelement <2 x double> %x, i32 0
- %x1 = extractelement <2 x double> %x, i32 1
- %x01 = fsub double %x0, %x1
- ret double %x01
-}
-
-define double @extract_extract_v2f64_fsub_f64_commute(<2 x double> %x) {
-; SSSE3-LABEL: extract_extract_v2f64_fsub_f64_commute:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: movapd %xmm0, %xmm1
-; SSSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSSE3-NEXT: subsd %xmm0, %xmm1
-; SSSE3-NEXT: movapd %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v2f64_fsub_f64_commute:
-; AVX: # %bb.0:
-; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
-; AVX-NEXT: retq
- %x0 = extractelement <2 x double> %x, i32 0
- %x1 = extractelement <2 x double> %x, i32 1
- %x01 = fsub double %x1, %x0
- ret double %x01
-}
-
-define double @extract_extract_v4f64_fsub_f64(<4 x double> %x) {
-; SSSE3-LABEL: extract_extract_v4f64_fsub_f64:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: movapd %xmm0, %xmm1
-; SSSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSSE3-NEXT: subsd %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v4f64_fsub_f64:
-; AVX: # %bb.0:
-; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
- %x0 = extractelement <4 x double> %x, i32 0
- %x1 = extractelement <4 x double> %x, i32 1
- %x01 = fsub double %x0, %x1
- ret double %x01
-}
-
-define double @extract_extract_v4f64_fsub_f64_commute(<4 x double> %x) {
-; SSSE3-LABEL: extract_extract_v4f64_fsub_f64_commute:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: movapd %xmm0, %xmm1
-; SSSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSSE3-NEXT: subsd %xmm0, %xmm1
-; SSSE3-NEXT: movapd %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v4f64_fsub_f64_commute:
-; AVX: # %bb.0:
-; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
- %x0 = extractelement <4 x double> %x, i32 0
- %x1 = extractelement <4 x double> %x, i32 1
- %x01 = fsub double %x1, %x0
- ret double %x01
-}
-
More information about the llvm-commits
mailing list