[llvm] r320951 - [X86] Add test cases that show cases where buildvector of extract and inserts should be turned into fmsubadd.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sun Dec 17 10:31:36 PST 2017
Author: ctopper
Date: Sun Dec 17 10:31:36 2017
New Revision: 320951
URL: http://llvm.org/viewvc/llvm-project?rev=320951&view=rev
Log:
[X86] Add test cases that show cases where buildvector of extract and inserts should be turned into fmsubadd.
This is a follow up to the fmaddsub support added in r320950. Hopefully in the future we can fix lowering to handle this fmsubadd too.
Modified:
llvm/trunk/test/CodeGen/X86/fmaddsub-combine.ll
Modified: llvm/trunk/test/CodeGen/X86/fmaddsub-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fmaddsub-combine.ll?rev=320951&r1=320950&r2=320951&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fmaddsub-combine.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fmaddsub-combine.ll Sun Dec 17 10:31:36 2017
@@ -396,4 +396,680 @@ bb:
ret <8 x double> %vecinsert8
}
+define <4 x float> @buildvector_mul_subadd_ps128(<4 x float> %C, <4 x float> %D, <4 x float> %B) #0 {
+; FMA3_256-LABEL: buildvector_mul_subadd_ps128:
+; FMA3_256: # %bb.0: # %bb
+; FMA3_256-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; FMA3_256-NEXT: vaddss %xmm2, %xmm0, %xmm1
+; FMA3_256-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; FMA3_256-NEXT: vpermilpd {{.*#+}} xmm4 = xmm2[1,0]
+; FMA3_256-NEXT: vaddss %xmm4, %xmm3, %xmm3
+; FMA3_256-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; FMA3_256-NEXT: vmovshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
+; FMA3_256-NEXT: vsubss %xmm5, %xmm4, %xmm4
+; FMA3_256-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; FMA3_256-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; FMA3_256-NEXT: vsubss %xmm2, %xmm0, %xmm0
+; FMA3_256-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2,3]
+; FMA3_256-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
+; FMA3_256-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; FMA3_256-NEXT: retq
+;
+; FMA3_512-LABEL: buildvector_mul_subadd_ps128:
+; FMA3_512: # %bb.0: # %bb
+; FMA3_512-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; FMA3_512-NEXT: vaddss %xmm2, %xmm0, %xmm1
+; FMA3_512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; FMA3_512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm2[1,0]
+; FMA3_512-NEXT: vaddss %xmm4, %xmm3, %xmm3
+; FMA3_512-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; FMA3_512-NEXT: vmovshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
+; FMA3_512-NEXT: vsubss %xmm5, %xmm4, %xmm4
+; FMA3_512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2,3]
+; FMA3_512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
+; FMA3_512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; FMA3_512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; FMA3_512-NEXT: vsubss %xmm2, %xmm0, %xmm0
+; FMA3_512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; FMA3_512-NEXT: retq
+;
+; FMA4-LABEL: buildvector_mul_subadd_ps128:
+; FMA4: # %bb.0: # %bb
+; FMA4-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vaddss %xmm2, %xmm0, %xmm1
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm4 = xmm2[1,0]
+; FMA4-NEXT: vaddss %xmm4, %xmm3, %xmm3
+; FMA4-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; FMA4-NEXT: vmovshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
+; FMA4-NEXT: vsubss %xmm5, %xmm4, %xmm4
+; FMA4-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; FMA4-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; FMA4-NEXT: vsubss %xmm2, %xmm0, %xmm0
+; FMA4-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2,3]
+; FMA4-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
+; FMA4-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; FMA4-NEXT: retq
+bb:
+ %A = fmul <4 x float> %C, %D
+ %A0 = extractelement <4 x float> %A, i32 0
+ %B0 = extractelement <4 x float> %B, i32 0
+ %sub0 = fadd float %A0, %B0
+ %A2 = extractelement <4 x float> %A, i32 2
+ %B2 = extractelement <4 x float> %B, i32 2
+ %sub2 = fadd float %A2, %B2
+ %A1 = extractelement <4 x float> %A, i32 1
+ %B1 = extractelement <4 x float> %B, i32 1
+ %add1 = fsub float %A1, %B1
+ %A3 = extractelement <4 x float> %A, i32 3
+ %B3 = extractelement <4 x float> %B, i32 3
+ %add3 = fsub float %A3, %B3
+ %vecinsert1 = insertelement <4 x float> undef, float %sub0, i32 0
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add1, i32 1
+ %vecinsert3 = insertelement <4 x float> %vecinsert2, float %sub2, i32 2
+ %vecinsert4 = insertelement <4 x float> %vecinsert3, float %add3, i32 3
+ ret <4 x float> %vecinsert4
+}
+
+define <2 x double> @buildvector_mul_subadd_pd128(<2 x double> %C, <2 x double> %D, <2 x double> %B) #0 {
+; FMA3-LABEL: buildvector_mul_subadd_pd128:
+; FMA3: # %bb.0: # %bb
+; FMA3-NEXT: vmulpd %xmm1, %xmm0, %xmm0
+; FMA3-NEXT: vaddsd %xmm2, %xmm0, %xmm1
+; FMA3-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; FMA3-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; FMA3-NEXT: vsubsd %xmm2, %xmm0, %xmm0
+; FMA3-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; FMA3-NEXT: retq
+;
+; FMA4-LABEL: buildvector_mul_subadd_pd128:
+; FMA4: # %bb.0: # %bb
+; FMA4-NEXT: vmulpd %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vaddsd %xmm2, %xmm0, %xmm1
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; FMA4-NEXT: vsubsd %xmm2, %xmm0, %xmm0
+; FMA4-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; FMA4-NEXT: retq
+bb:
+ %A = fmul <2 x double> %C, %D
+ %A0 = extractelement <2 x double> %A, i32 0
+ %B0 = extractelement <2 x double> %B, i32 0
+ %sub0 = fadd double %A0, %B0
+ %A1 = extractelement <2 x double> %A, i32 1
+ %B1 = extractelement <2 x double> %B, i32 1
+ %add1 = fsub double %A1, %B1
+ %vecinsert1 = insertelement <2 x double> undef, double %sub0, i32 0
+ %vecinsert2 = insertelement <2 x double> %vecinsert1, double %add1, i32 1
+ ret <2 x double> %vecinsert2
+}
+
+define <8 x float> @buildvector_mul_subadd_ps256(<8 x float> %C, <8 x float> %D, <8 x float> %B) #0 {
+; FMA3_256-LABEL: buildvector_mul_subadd_ps256:
+; FMA3_256: # %bb.0: # %bb
+; FMA3_256-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; FMA3_256-NEXT: vaddss %xmm2, %xmm0, %xmm8
+; FMA3_256-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; FMA3_256-NEXT: vpermilpd {{.*#+}} xmm4 = xmm2[1,0]
+; FMA3_256-NEXT: vaddss %xmm4, %xmm3, %xmm9
+; FMA3_256-NEXT: vextractf128 $1, %ymm0, %xmm4
+; FMA3_256-NEXT: vextractf128 $1, %ymm2, %xmm5
+; FMA3_256-NEXT: vaddss %xmm5, %xmm4, %xmm6
+; FMA3_256-NEXT: vpermilpd {{.*#+}} xmm7 = xmm4[1,0]
+; FMA3_256-NEXT: vpermilpd {{.*#+}} xmm1 = xmm5[1,0]
+; FMA3_256-NEXT: vaddss %xmm1, %xmm7, %xmm1
+; FMA3_256-NEXT: vmovshdup {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; FMA3_256-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; FMA3_256-NEXT: vsubss %xmm3, %xmm7, %xmm3
+; FMA3_256-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; FMA3_256-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; FMA3_256-NEXT: vsubss %xmm2, %xmm0, %xmm0
+; FMA3_256-NEXT: vmovshdup {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; FMA3_256-NEXT: vmovshdup {{.*#+}} xmm7 = xmm5[1,1,3,3]
+; FMA3_256-NEXT: vsubss %xmm7, %xmm2, %xmm2
+; FMA3_256-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[3,1,2,3]
+; FMA3_256-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[3,1,2,3]
+; FMA3_256-NEXT: vsubss %xmm5, %xmm4, %xmm4
+; FMA3_256-NEXT: vinsertps {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[2,3]
+; FMA3_256-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
+; FMA3_256-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
+; FMA3_256-NEXT: vinsertps {{.*#+}} xmm2 = xmm8[0],xmm3[0],xmm8[2,3]
+; FMA3_256-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0],xmm2[3]
+; FMA3_256-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
+; FMA3_256-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; FMA3_256-NEXT: retq
+;
+; FMA3_512-LABEL: buildvector_mul_subadd_ps256:
+; FMA3_512: # %bb.0: # %bb
+; FMA3_512-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; FMA3_512-NEXT: vaddss %xmm2, %xmm0, %xmm1
+; FMA3_512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; FMA3_512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm2[1,0]
+; FMA3_512-NEXT: vaddss %xmm4, %xmm3, %xmm3
+; FMA3_512-NEXT: vextractf128 $1, %ymm0, %xmm4
+; FMA3_512-NEXT: vextractf128 $1, %ymm2, %xmm5
+; FMA3_512-NEXT: vaddss %xmm5, %xmm4, %xmm8
+; FMA3_512-NEXT: vpermilpd {{.*#+}} xmm7 = xmm4[1,0]
+; FMA3_512-NEXT: vpermilpd {{.*#+}} xmm6 = xmm5[1,0]
+; FMA3_512-NEXT: vaddss %xmm6, %xmm7, %xmm9
+; FMA3_512-NEXT: vmovshdup {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; FMA3_512-NEXT: vmovshdup {{.*#+}} xmm6 = xmm2[1,1,3,3]
+; FMA3_512-NEXT: vsubss %xmm6, %xmm7, %xmm6
+; FMA3_512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[2,3]
+; FMA3_512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
+; FMA3_512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; FMA3_512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; FMA3_512-NEXT: vsubss %xmm2, %xmm0, %xmm0
+; FMA3_512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; FMA3_512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm4[1,1,3,3]
+; FMA3_512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm5[1,1,3,3]
+; FMA3_512-NEXT: vsubss %xmm2, %xmm1, %xmm1
+; FMA3_512-NEXT: vinsertps {{.*#+}} xmm1 = xmm8[0],xmm1[0],xmm8[2,3]
+; FMA3_512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm9[0],xmm1[3]
+; FMA3_512-NEXT: vpermilps {{.*#+}} xmm2 = xmm4[3,1,2,3]
+; FMA3_512-NEXT: vpermilps {{.*#+}} xmm3 = xmm5[3,1,2,3]
+; FMA3_512-NEXT: vsubss %xmm3, %xmm2, %xmm2
+; FMA3_512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
+; FMA3_512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; FMA3_512-NEXT: retq
+;
+; FMA4-LABEL: buildvector_mul_subadd_ps256:
+; FMA4: # %bb.0: # %bb
+; FMA4-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; FMA4-NEXT: vaddss %xmm2, %xmm0, %xmm8
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm4 = xmm2[1,0]
+; FMA4-NEXT: vaddss %xmm4, %xmm3, %xmm9
+; FMA4-NEXT: vextractf128 $1, %ymm0, %xmm4
+; FMA4-NEXT: vextractf128 $1, %ymm2, %xmm5
+; FMA4-NEXT: vaddss %xmm5, %xmm4, %xmm6
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm7 = xmm4[1,0]
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm1 = xmm5[1,0]
+; FMA4-NEXT: vaddss %xmm1, %xmm7, %xmm1
+; FMA4-NEXT: vmovshdup {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; FMA4-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; FMA4-NEXT: vsubss %xmm3, %xmm7, %xmm3
+; FMA4-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; FMA4-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; FMA4-NEXT: vsubss %xmm2, %xmm0, %xmm0
+; FMA4-NEXT: vmovshdup {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; FMA4-NEXT: vmovshdup {{.*#+}} xmm7 = xmm5[1,1,3,3]
+; FMA4-NEXT: vsubss %xmm7, %xmm2, %xmm2
+; FMA4-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[3,1,2,3]
+; FMA4-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[3,1,2,3]
+; FMA4-NEXT: vsubss %xmm5, %xmm4, %xmm4
+; FMA4-NEXT: vinsertps {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[2,3]
+; FMA4-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
+; FMA4-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
+; FMA4-NEXT: vinsertps {{.*#+}} xmm2 = xmm8[0],xmm3[0],xmm8[2,3]
+; FMA4-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0],xmm2[3]
+; FMA4-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
+; FMA4-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; FMA4-NEXT: retq
+bb:
+ %A = fmul <8 x float> %C, %D
+ %A0 = extractelement <8 x float> %A, i32 0
+ %B0 = extractelement <8 x float> %B, i32 0
+ %sub0 = fadd float %A0, %B0
+ %A2 = extractelement <8 x float> %A, i32 2
+ %B2 = extractelement <8 x float> %B, i32 2
+ %sub2 = fadd float %A2, %B2
+ %A4 = extractelement <8 x float> %A, i32 4
+ %B4 = extractelement <8 x float> %B, i32 4
+ %sub4 = fadd float %A4, %B4
+ %A6 = extractelement <8 x float> %A, i32 6
+ %B6 = extractelement <8 x float> %B, i32 6
+ %sub6 = fadd float %A6, %B6
+ %A1 = extractelement <8 x float> %A, i32 1
+ %B1 = extractelement <8 x float> %B, i32 1
+ %add1 = fsub float %A1, %B1
+ %A3 = extractelement <8 x float> %A, i32 3
+ %B3 = extractelement <8 x float> %B, i32 3
+ %add3 = fsub float %A3, %B3
+ %A5 = extractelement <8 x float> %A, i32 5
+ %B5 = extractelement <8 x float> %B, i32 5
+ %add5 = fsub float %A5, %B5
+ %A7 = extractelement <8 x float> %A, i32 7
+ %B7 = extractelement <8 x float> %B, i32 7
+ %add7 = fsub float %A7, %B7
+ %vecinsert1 = insertelement <8 x float> undef, float %sub0, i32 0
+ %vecinsert2 = insertelement <8 x float> %vecinsert1, float %add1, i32 1
+ %vecinsert3 = insertelement <8 x float> %vecinsert2, float %sub2, i32 2
+ %vecinsert4 = insertelement <8 x float> %vecinsert3, float %add3, i32 3
+ %vecinsert5 = insertelement <8 x float> %vecinsert4, float %sub4, i32 4
+ %vecinsert6 = insertelement <8 x float> %vecinsert5, float %add5, i32 5
+ %vecinsert7 = insertelement <8 x float> %vecinsert6, float %sub6, i32 6
+ %vecinsert8 = insertelement <8 x float> %vecinsert7, float %add7, i32 7
+ ret <8 x float> %vecinsert8
+}
+
+define <4 x double> @buildvector_mul_subadd_pd256(<4 x double> %C, <4 x double> %D, <4 x double> %B) #0 {
+; FMA3-LABEL: buildvector_mul_subadd_pd256:
+; FMA3: # %bb.0: # %bb
+; FMA3-NEXT: vmulpd %ymm1, %ymm0, %ymm0
+; FMA3-NEXT: vaddsd %xmm2, %xmm0, %xmm1
+; FMA3-NEXT: vextractf128 $1, %ymm0, %xmm3
+; FMA3-NEXT: vextractf128 $1, %ymm2, %xmm4
+; FMA3-NEXT: vaddsd %xmm4, %xmm3, %xmm5
+; FMA3-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; FMA3-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; FMA3-NEXT: vsubsd %xmm2, %xmm0, %xmm0
+; FMA3-NEXT: vpermilpd {{.*#+}} xmm2 = xmm3[1,0]
+; FMA3-NEXT: vpermilpd {{.*#+}} xmm3 = xmm4[1,0]
+; FMA3-NEXT: vsubsd %xmm3, %xmm2, %xmm2
+; FMA3-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm5[0],xmm2[0]
+; FMA3-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; FMA3-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; FMA3-NEXT: retq
+;
+; FMA4-LABEL: buildvector_mul_subadd_pd256:
+; FMA4: # %bb.0: # %bb
+; FMA4-NEXT: vmulpd %ymm1, %ymm0, %ymm0
+; FMA4-NEXT: vaddsd %xmm2, %xmm0, %xmm1
+; FMA4-NEXT: vextractf128 $1, %ymm0, %xmm3
+; FMA4-NEXT: vextractf128 $1, %ymm2, %xmm4
+; FMA4-NEXT: vaddsd %xmm4, %xmm3, %xmm5
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; FMA4-NEXT: vsubsd %xmm2, %xmm0, %xmm0
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm2 = xmm3[1,0]
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm3 = xmm4[1,0]
+; FMA4-NEXT: vsubsd %xmm3, %xmm2, %xmm2
+; FMA4-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm5[0],xmm2[0]
+; FMA4-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; FMA4-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; FMA4-NEXT: retq
+bb:
+ %A = fmul <4 x double> %C, %D
+ %A0 = extractelement <4 x double> %A, i32 0
+ %B0 = extractelement <4 x double> %B, i32 0
+ %sub0 = fadd double %A0, %B0
+ %A2 = extractelement <4 x double> %A, i32 2
+ %B2 = extractelement <4 x double> %B, i32 2
+ %sub2 = fadd double %A2, %B2
+ %A1 = extractelement <4 x double> %A, i32 1
+ %B1 = extractelement <4 x double> %B, i32 1
+ %add1 = fsub double %A1, %B1
+ %A3 = extractelement <4 x double> %A, i32 3
+ %B3 = extractelement <4 x double> %B, i32 3
+ %add3 = fsub double %A3, %B3
+ %vecinsert1 = insertelement <4 x double> undef, double %sub0, i32 0
+ %vecinsert2 = insertelement <4 x double> %vecinsert1, double %add1, i32 1
+ %vecinsert3 = insertelement <4 x double> %vecinsert2, double %sub2, i32 2
+ %vecinsert4 = insertelement <4 x double> %vecinsert3, double %add3, i32 3
+ ret <4 x double> %vecinsert4
+}
+
+define <16 x float> @buildvector_mul_subadd_ps512(<16 x float> %C, <16 x float> %D, <16 x float> %B) #0 {
+; FMA3_256-LABEL: buildvector_mul_subadd_ps512:
+; FMA3_256: # %bb.0: # %bb
+; FMA3_256-NEXT: vmulps %ymm3, %ymm1, %ymm3
+; FMA3_256-NEXT: vmulps %ymm2, %ymm0, %ymm8
+; FMA3_256-NEXT: vaddss %xmm4, %xmm8, %xmm0
+; FMA3_256-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; FMA3_256-NEXT: vpermilpd {{.*#+}} xmm1 = xmm8[1,0]
+; FMA3_256-NEXT: vpermilpd {{.*#+}} xmm2 = xmm4[1,0]
+; FMA3_256-NEXT: vaddss %xmm2, %xmm1, %xmm10
+; FMA3_256-NEXT: vextractf128 $1, %ymm8, %xmm0
+; FMA3_256-NEXT: vextractf128 $1, %ymm4, %xmm1
+; FMA3_256-NEXT: vaddss %xmm1, %xmm0, %xmm11
+; FMA3_256-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; FMA3_256-NEXT: vpermilpd {{.*#+}} xmm7 = xmm1[1,0]
+; FMA3_256-NEXT: vaddss %xmm7, %xmm2, %xmm13
+; FMA3_256-NEXT: vpermilpd {{.*#+}} xmm2 = xmm3[1,0]
+; FMA3_256-NEXT: vpermilpd {{.*#+}} xmm6 = xmm5[1,0]
+; FMA3_256-NEXT: vaddss %xmm6, %xmm2, %xmm12
+; FMA3_256-NEXT: vextractf128 $1, %ymm3, %xmm6
+; FMA3_256-NEXT: vpermilpd {{.*#+}} xmm14 = xmm6[1,0]
+; FMA3_256-NEXT: vextractf128 $1, %ymm5, %xmm7
+; FMA3_256-NEXT: vpermilpd {{.*#+}} xmm2 = xmm7[1,0]
+; FMA3_256-NEXT: vaddss %xmm2, %xmm14, %xmm14
+; FMA3_256-NEXT: vmovshdup {{.*#+}} xmm15 = xmm8[1,1,3,3]
+; FMA3_256-NEXT: vmovshdup {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; FMA3_256-NEXT: vsubss %xmm2, %xmm15, %xmm9
+; FMA3_256-NEXT: vaddss %xmm5, %xmm3, %xmm15
+; FMA3_256-NEXT: vpermilps {{.*#+}} xmm8 = xmm8[3,1,2,3]
+; FMA3_256-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[3,1,2,3]
+; FMA3_256-NEXT: vsubss %xmm4, %xmm8, %xmm4
+; FMA3_256-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; FMA3_256-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; FMA3_256-NEXT: vsubss %xmm1, %xmm0, %xmm0
+; FMA3_256-NEXT: vmovshdup {{.*#+}} xmm1 = xmm3[1,1,3,3]
+; FMA3_256-NEXT: vmovshdup {{.*#+}} xmm2 = xmm5[1,1,3,3]
+; FMA3_256-NEXT: vsubss %xmm2, %xmm1, %xmm1
+; FMA3_256-NEXT: vpermilps {{.*#+}} xmm2 = xmm3[3,1,2,3]
+; FMA3_256-NEXT: vpermilps {{.*#+}} xmm3 = xmm5[3,1,2,3]
+; FMA3_256-NEXT: vsubss %xmm3, %xmm2, %xmm2
+; FMA3_256-NEXT: vmovshdup {{.*#+}} xmm3 = xmm6[1,1,3,3]
+; FMA3_256-NEXT: vmovshdup {{.*#+}} xmm5 = xmm7[1,1,3,3]
+; FMA3_256-NEXT: vsubss %xmm5, %xmm3, %xmm3
+; FMA3_256-NEXT: vpermilps {{.*#+}} xmm5 = xmm6[3,1,2,3]
+; FMA3_256-NEXT: vpermilps {{.*#+}} xmm6 = xmm7[3,1,2,3]
+; FMA3_256-NEXT: vsubss %xmm6, %xmm5, %xmm5
+; FMA3_256-NEXT: vinsertps {{.*#+}} xmm6 = xmm11[0,1],xmm13[0],xmm11[3]
+; FMA3_256-NEXT: vinsertps {{.*#+}} xmm0 = xmm6[0,1,2],xmm0[0]
+; FMA3_256-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload
+; FMA3_256-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[2,3]
+; FMA3_256-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0,1],xmm10[0],xmm6[3]
+; FMA3_256-NEXT: vinsertps {{.*#+}} xmm4 = xmm6[0,1,2],xmm4[0]
+; FMA3_256-NEXT: vinsertf128 $1, %xmm0, %ymm4, %ymm0
+; FMA3_256-NEXT: vmovsldup {{.*#+}} xmm3 = xmm3[0,0,2,2]
+; FMA3_256-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm14[0],xmm3[3]
+; FMA3_256-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm5[0]
+; FMA3_256-NEXT: vinsertps {{.*#+}} xmm1 = xmm15[0],xmm1[0],xmm15[2,3]
+; FMA3_256-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm12[0],xmm1[3]
+; FMA3_256-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
+; FMA3_256-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; FMA3_256-NEXT: retq
+;
+; FMA3_512-LABEL: buildvector_mul_subadd_ps512:
+; FMA3_512: # %bb.0: # %bb
+; FMA3_512-NEXT: vmulps %zmm1, %zmm0, %zmm0
+; FMA3_512-NEXT: vaddss %xmm2, %xmm0, %xmm8
+; FMA3_512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; FMA3_512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm2[1,0]
+; FMA3_512-NEXT: vaddss %xmm4, %xmm3, %xmm10
+; FMA3_512-NEXT: vextractf128 $1, %ymm0, %xmm4
+; FMA3_512-NEXT: vextractf128 $1, %ymm2, %xmm5
+; FMA3_512-NEXT: vaddss %xmm5, %xmm4, %xmm6
+; FMA3_512-NEXT: vpermilpd {{.*#+}} xmm7 = xmm4[1,0]
+; FMA3_512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm5[1,0]
+; FMA3_512-NEXT: vaddss %xmm1, %xmm7, %xmm1
+; FMA3_512-NEXT: vinsertps {{.*#+}} xmm11 = xmm6[0,1],xmm1[0],xmm6[3]
+; FMA3_512-NEXT: vextractf32x4 $2, %zmm0, %xmm6
+; FMA3_512-NEXT: vextractf32x4 $2, %zmm2, %xmm7
+; FMA3_512-NEXT: vaddss %xmm7, %xmm6, %xmm9
+; FMA3_512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm6[1,0]
+; FMA3_512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm7[1,0]
+; FMA3_512-NEXT: vaddss %xmm1, %xmm3, %xmm12
+; FMA3_512-NEXT: vextractf32x4 $3, %zmm0, %xmm14
+; FMA3_512-NEXT: vpermilpd {{.*#+}} xmm13 = xmm14[1,0]
+; FMA3_512-NEXT: vextractf32x4 $3, %zmm2, %xmm15
+; FMA3_512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm15[1,0]
+; FMA3_512-NEXT: vaddss %xmm3, %xmm13, %xmm13
+; FMA3_512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; FMA3_512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; FMA3_512-NEXT: vsubss %xmm1, %xmm3, %xmm1
+; FMA3_512-NEXT: vinsertps {{.*#+}} xmm1 = xmm8[0],xmm1[0],xmm8[2,3]
+; FMA3_512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm10[0],xmm1[3]
+; FMA3_512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; FMA3_512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; FMA3_512-NEXT: vsubss %xmm2, %xmm0, %xmm0
+; FMA3_512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; FMA3_512-NEXT: vpermilps {{.*#+}} xmm1 = xmm4[3,1,2,3]
+; FMA3_512-NEXT: vpermilps {{.*#+}} xmm2 = xmm5[3,1,2,3]
+; FMA3_512-NEXT: vsubss %xmm2, %xmm1, %xmm1
+; FMA3_512-NEXT: vinsertps {{.*#+}} xmm1 = xmm11[0,1,2],xmm1[0]
+; FMA3_512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm6[1,1,3,3]
+; FMA3_512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm7[1,1,3,3]
+; FMA3_512-NEXT: vsubss %xmm3, %xmm2, %xmm2
+; FMA3_512-NEXT: vinsertps {{.*#+}} xmm2 = xmm9[0],xmm2[0],xmm9[2,3]
+; FMA3_512-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm12[0],xmm2[3]
+; FMA3_512-NEXT: vpermilps {{.*#+}} xmm3 = xmm6[3,1,2,3]
+; FMA3_512-NEXT: vpermilps {{.*#+}} xmm4 = xmm7[3,1,2,3]
+; FMA3_512-NEXT: vsubss %xmm4, %xmm3, %xmm3
+; FMA3_512-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
+; FMA3_512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm14[1,1,3,3]
+; FMA3_512-NEXT: vmovshdup {{.*#+}} xmm4 = xmm15[1,1,3,3]
+; FMA3_512-NEXT: vsubss %xmm4, %xmm3, %xmm3
+; FMA3_512-NEXT: vpermilps {{.*#+}} xmm4 = xmm14[3,1,2,3]
+; FMA3_512-NEXT: vpermilps {{.*#+}} xmm5 = xmm15[3,1,2,3]
+; FMA3_512-NEXT: vsubss %xmm5, %xmm4, %xmm4
+; FMA3_512-NEXT: vmovsldup {{.*#+}} xmm3 = xmm3[0,0,2,2]
+; FMA3_512-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm13[0],xmm3[3]
+; FMA3_512-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
+; FMA3_512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; FMA3_512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; FMA3_512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; FMA3_512-NEXT: retq
+;
+; FMA4-LABEL: buildvector_mul_subadd_ps512:
+; FMA4: # %bb.0: # %bb
+; FMA4-NEXT: vmulps %ymm3, %ymm1, %ymm3
+; FMA4-NEXT: vmulps %ymm2, %ymm0, %ymm8
+; FMA4-NEXT: vaddss %xmm4, %xmm8, %xmm0
+; FMA4-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm1 = xmm8[1,0]
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm2 = xmm4[1,0]
+; FMA4-NEXT: vaddss %xmm2, %xmm1, %xmm10
+; FMA4-NEXT: vextractf128 $1, %ymm8, %xmm0
+; FMA4-NEXT: vextractf128 $1, %ymm4, %xmm1
+; FMA4-NEXT: vaddss %xmm1, %xmm0, %xmm11
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm7 = xmm1[1,0]
+; FMA4-NEXT: vaddss %xmm7, %xmm2, %xmm13
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm2 = xmm3[1,0]
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm6 = xmm5[1,0]
+; FMA4-NEXT: vaddss %xmm6, %xmm2, %xmm12
+; FMA4-NEXT: vextractf128 $1, %ymm3, %xmm6
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm14 = xmm6[1,0]
+; FMA4-NEXT: vextractf128 $1, %ymm5, %xmm7
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm2 = xmm7[1,0]
+; FMA4-NEXT: vaddss %xmm2, %xmm14, %xmm14
+; FMA4-NEXT: vmovshdup {{.*#+}} xmm15 = xmm8[1,1,3,3]
+; FMA4-NEXT: vmovshdup {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; FMA4-NEXT: vsubss %xmm2, %xmm15, %xmm9
+; FMA4-NEXT: vaddss %xmm5, %xmm3, %xmm15
+; FMA4-NEXT: vpermilps {{.*#+}} xmm8 = xmm8[3,1,2,3]
+; FMA4-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[3,1,2,3]
+; FMA4-NEXT: vsubss %xmm4, %xmm8, %xmm4
+; FMA4-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; FMA4-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; FMA4-NEXT: vsubss %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vmovshdup {{.*#+}} xmm1 = xmm3[1,1,3,3]
+; FMA4-NEXT: vmovshdup {{.*#+}} xmm2 = xmm5[1,1,3,3]
+; FMA4-NEXT: vsubss %xmm2, %xmm1, %xmm1
+; FMA4-NEXT: vpermilps {{.*#+}} xmm2 = xmm3[3,1,2,3]
+; FMA4-NEXT: vpermilps {{.*#+}} xmm3 = xmm5[3,1,2,3]
+; FMA4-NEXT: vsubss %xmm3, %xmm2, %xmm2
+; FMA4-NEXT: vmovshdup {{.*#+}} xmm3 = xmm6[1,1,3,3]
+; FMA4-NEXT: vmovshdup {{.*#+}} xmm5 = xmm7[1,1,3,3]
+; FMA4-NEXT: vsubss %xmm5, %xmm3, %xmm3
+; FMA4-NEXT: vpermilps {{.*#+}} xmm5 = xmm6[3,1,2,3]
+; FMA4-NEXT: vpermilps {{.*#+}} xmm6 = xmm7[3,1,2,3]
+; FMA4-NEXT: vsubss %xmm6, %xmm5, %xmm5
+; FMA4-NEXT: vinsertps {{.*#+}} xmm6 = xmm11[0,1],xmm13[0],xmm11[3]
+; FMA4-NEXT: vinsertps {{.*#+}} xmm0 = xmm6[0,1,2],xmm0[0]
+; FMA4-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload
+; FMA4-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[2,3]
+; FMA4-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0,1],xmm10[0],xmm6[3]
+; FMA4-NEXT: vinsertps {{.*#+}} xmm4 = xmm6[0,1,2],xmm4[0]
+; FMA4-NEXT: vinsertf128 $1, %xmm0, %ymm4, %ymm0
+; FMA4-NEXT: vmovsldup {{.*#+}} xmm3 = xmm3[0,0,2,2]
+; FMA4-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm14[0],xmm3[3]
+; FMA4-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm5[0]
+; FMA4-NEXT: vinsertps {{.*#+}} xmm1 = xmm15[0],xmm1[0],xmm15[2,3]
+; FMA4-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm12[0],xmm1[3]
+; FMA4-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
+; FMA4-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; FMA4-NEXT: retq
+bb:
+ %A = fmul <16 x float> %C, %D
+ %A0 = extractelement <16 x float> %A, i32 0
+ %B0 = extractelement <16 x float> %B, i32 0
+ %sub0 = fadd float %A0, %B0
+ %A2 = extractelement <16 x float> %A, i32 2
+ %B2 = extractelement <16 x float> %B, i32 2
+ %sub2 = fadd float %A2, %B2
+ %A4 = extractelement <16 x float> %A, i32 4
+ %B4 = extractelement <16 x float> %B, i32 4
+ %sub4 = fadd float %A4, %B4
+ %A6 = extractelement <16 x float> %A, i32 6
+ %B6 = extractelement <16 x float> %B, i32 6
+ %sub6 = fadd float %A6, %B6
+ %A8 = extractelement <16 x float> %A, i32 8
+ %B8 = extractelement <16 x float> %B, i32 8
+ %sub8 = fadd float %A8, %B8
+ %A10 = extractelement <16 x float> %A, i32 10
+ %B10 = extractelement <16 x float> %B, i32 10
+ %sub10 = fadd float %A10, %B10
+ %A12 = extractelement <16 x float> %A, i32 12
+ %B12 = extractelement <16 x float> %B, i32 12
+ %sub12 = fadd float %A12, %B12
+ %A14 = extractelement <16 x float> %A, i32 14
+ %B14 = extractelement <16 x float> %B, i32 14
+ %sub14 = fadd float %A14, %B14
+ %A1 = extractelement <16 x float> %A, i32 1
+ %B1 = extractelement <16 x float> %B, i32 1
+ %add1 = fsub float %A1, %B1
+ %A3 = extractelement <16 x float> %A, i32 3
+ %B3 = extractelement <16 x float> %B, i32 3
+ %add3 = fsub float %A3, %B3
+ %A5 = extractelement <16 x float> %A, i32 5
+ %B5 = extractelement <16 x float> %B, i32 5
+ %add5 = fsub float %A5, %B5
+ %A7 = extractelement <16 x float> %A, i32 7
+ %B7 = extractelement <16 x float> %B, i32 7
+ %add7 = fsub float %A7, %B7
+ %A9 = extractelement <16 x float> %A, i32 9
+ %B9 = extractelement <16 x float> %B, i32 9
+ %add9 = fsub float %A9, %B9
+ %A11 = extractelement <16 x float> %A, i32 11
+ %B11 = extractelement <16 x float> %B, i32 11
+ %add11 = fsub float %A11, %B11
+ %A13 = extractelement <16 x float> %A, i32 13
+ %B13 = extractelement <16 x float> %B, i32 13
+ %add13 = fsub float %A13, %B13
+ %A15 = extractelement <16 x float> %A, i32 15
+ %B15 = extractelement <16 x float> %B, i32 15
+ %add15 = fsub float %A15, %B15
+ %vecinsert1 = insertelement <16 x float> undef, float %sub0, i32 0
+ %vecinsert2 = insertelement <16 x float> %vecinsert1, float %add1, i32 1
+ %vecinsert3 = insertelement <16 x float> %vecinsert2, float %sub2, i32 2
+ %vecinsert4 = insertelement <16 x float> %vecinsert3, float %add3, i32 3
+ %vecinsert5 = insertelement <16 x float> %vecinsert4, float %sub4, i32 4
+ ; element 5 is undef
+ %vecinsert7 = insertelement <16 x float> %vecinsert5, float %sub6, i32 6
+ %vecinsert8 = insertelement <16 x float> %vecinsert7, float %add7, i32 7
+ %vecinsert9 = insertelement <16 x float> %vecinsert8, float %sub8, i32 8
+ %vecinsert10 = insertelement <16 x float> %vecinsert9, float %add9, i32 9
+ %vecinsert11 = insertelement <16 x float> %vecinsert10, float %sub10, i32 10
+ %vecinsert12 = insertelement <16 x float> %vecinsert11, float %add11, i32 11
+ ; element 12 is undef
+ %vecinsert14 = insertelement <16 x float> %vecinsert12, float %add13, i32 13
+ %vecinsert15 = insertelement <16 x float> %vecinsert14, float %sub14, i32 14
+ %vecinsert16 = insertelement <16 x float> %vecinsert15, float %add15, i32 15
+ ret <16 x float> %vecinsert16
+}
+
+define <8 x double> @buildvector_mul_subadd_pd512(<8 x double> %C, <8 x double> %D, <8 x double> %B) #0 {
+; FMA3_256-LABEL: buildvector_mul_subadd_pd512:
+; FMA3_256: # %bb.0: # %bb
+; FMA3_256-NEXT: vmulpd %ymm3, %ymm1, %ymm1
+; FMA3_256-NEXT: vmulpd %ymm2, %ymm0, %ymm0
+; FMA3_256-NEXT: vaddsd %xmm4, %xmm0, %xmm9
+; FMA3_256-NEXT: vextractf128 $1, %ymm0, %xmm3
+; FMA3_256-NEXT: vextractf128 $1, %ymm4, %xmm6
+; FMA3_256-NEXT: vaddsd %xmm6, %xmm3, %xmm7
+; FMA3_256-NEXT: vaddsd %xmm5, %xmm1, %xmm8
+; FMA3_256-NEXT: vextractf128 $1, %ymm1, %xmm1
+; FMA3_256-NEXT: vextractf128 $1, %ymm5, %xmm5
+; FMA3_256-NEXT: vaddsd %xmm5, %xmm1, %xmm2
+; FMA3_256-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; FMA3_256-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; FMA3_256-NEXT: vsubsd %xmm4, %xmm0, %xmm0
+; FMA3_256-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
+; FMA3_256-NEXT: vpermilpd {{.*#+}} xmm4 = xmm6[1,0]
+; FMA3_256-NEXT: vsubsd %xmm4, %xmm3, %xmm3
+; FMA3_256-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; FMA3_256-NEXT: vpermilpd {{.*#+}} xmm4 = xmm5[1,0]
+; FMA3_256-NEXT: vsubsd %xmm4, %xmm1, %xmm1
+; FMA3_256-NEXT: vunpcklpd {{.*#+}} xmm3 = xmm7[0],xmm3[0]
+; FMA3_256-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm9[0],xmm0[0]
+; FMA3_256-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; FMA3_256-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; FMA3_256-NEXT: vinsertf128 $1, %xmm1, %ymm8, %ymm1
+; FMA3_256-NEXT: retq
+;
+; FMA3_512-LABEL: buildvector_mul_subadd_pd512:
+; FMA3_512: # %bb.0: # %bb
+; FMA3_512-NEXT: vmulpd %zmm1, %zmm0, %zmm0
+; FMA3_512-NEXT: vaddsd %xmm2, %xmm0, %xmm8
+; FMA3_512-NEXT: vextractf128 $1, %ymm0, %xmm3
+; FMA3_512-NEXT: vextractf128 $1, %ymm2, %xmm4
+; FMA3_512-NEXT: vaddsd %xmm4, %xmm3, %xmm5
+; FMA3_512-NEXT: vextractf32x4 $2, %zmm0, %xmm6
+; FMA3_512-NEXT: vextractf32x4 $2, %zmm2, %xmm7
+; FMA3_512-NEXT: vaddsd %xmm7, %xmm6, %xmm9
+; FMA3_512-NEXT: vextractf32x4 $3, %zmm0, %xmm7
+; FMA3_512-NEXT: vextractf32x4 $3, %zmm2, %xmm1
+; FMA3_512-NEXT: vaddsd %xmm1, %xmm7, %xmm6
+; FMA3_512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; FMA3_512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; FMA3_512-NEXT: vsubsd %xmm2, %xmm0, %xmm0
+; FMA3_512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm3[1,0]
+; FMA3_512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm4[1,0]
+; FMA3_512-NEXT: vsubsd %xmm3, %xmm2, %xmm2
+; FMA3_512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm7[1,0]
+; FMA3_512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; FMA3_512-NEXT: vsubsd %xmm1, %xmm3, %xmm1
+; FMA3_512-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm5[0],xmm2[0]
+; FMA3_512-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm8[0],xmm0[0]
+; FMA3_512-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; FMA3_512-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm6[0],xmm1[0]
+; FMA3_512-NEXT: vinsertf128 $1, %xmm1, %ymm9, %ymm1
+; FMA3_512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; FMA3_512-NEXT: retq
+;
+; FMA4-LABEL: buildvector_mul_subadd_pd512:
+; FMA4: # %bb.0: # %bb
+; FMA4-NEXT: vmulpd %ymm3, %ymm1, %ymm1
+; FMA4-NEXT: vmulpd %ymm2, %ymm0, %ymm0
+; FMA4-NEXT: vaddsd %xmm4, %xmm0, %xmm9
+; FMA4-NEXT: vextractf128 $1, %ymm0, %xmm3
+; FMA4-NEXT: vextractf128 $1, %ymm4, %xmm6
+; FMA4-NEXT: vaddsd %xmm6, %xmm3, %xmm7
+; FMA4-NEXT: vaddsd %xmm5, %xmm1, %xmm8
+; FMA4-NEXT: vextractf128 $1, %ymm1, %xmm1
+; FMA4-NEXT: vextractf128 $1, %ymm5, %xmm5
+; FMA4-NEXT: vaddsd %xmm5, %xmm1, %xmm2
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; FMA4-NEXT: vsubsd %xmm4, %xmm0, %xmm0
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm4 = xmm6[1,0]
+; FMA4-NEXT: vsubsd %xmm4, %xmm3, %xmm3
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; FMA4-NEXT: vpermilpd {{.*#+}} xmm4 = xmm5[1,0]
+; FMA4-NEXT: vsubsd %xmm4, %xmm1, %xmm1
+; FMA4-NEXT: vunpcklpd {{.*#+}} xmm3 = xmm7[0],xmm3[0]
+; FMA4-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm9[0],xmm0[0]
+; FMA4-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; FMA4-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; FMA4-NEXT: vinsertf128 $1, %xmm1, %ymm8, %ymm1
+; FMA4-NEXT: retq
+bb:
+ %A = fmul <8 x double> %C, %D
+ %A0 = extractelement <8 x double> %A, i32 0
+ %B0 = extractelement <8 x double> %B, i32 0
+ %sub0 = fadd double %A0, %B0
+ %A2 = extractelement <8 x double> %A, i32 2
+ %B2 = extractelement <8 x double> %B, i32 2
+ %sub2 = fadd double %A2, %B2
+ %A4 = extractelement <8 x double> %A, i32 4
+ %B4 = extractelement <8 x double> %B, i32 4
+ %sub4 = fadd double %A4, %B4
+ %A6 = extractelement <8 x double> %A, i32 6
+ %B6 = extractelement <8 x double> %B, i32 6
+ %sub6 = fadd double %A6, %B6
+ %A1 = extractelement <8 x double> %A, i32 1
+ %B1 = extractelement <8 x double> %B, i32 1
+ %add1 = fsub double %A1, %B1
+ %A3 = extractelement <8 x double> %A, i32 3
+ %B3 = extractelement <8 x double> %B, i32 3
+ %add3 = fsub double %A3, %B3
+ %A7 = extractelement <8 x double> %A, i32 7
+ %B7 = extractelement <8 x double> %B, i32 7
+ %add7 = fsub double %A7, %B7
+ %vecinsert1 = insertelement <8 x double> undef, double %sub0, i32 0
+ %vecinsert2 = insertelement <8 x double> %vecinsert1, double %add1, i32 1
+ %vecinsert3 = insertelement <8 x double> %vecinsert2, double %sub2, i32 2
+ %vecinsert4 = insertelement <8 x double> %vecinsert3, double %add3, i32 3
+ %vecinsert5 = insertelement <8 x double> %vecinsert4, double %sub4, i32 4
+ ; element 5 is undef
+ %vecinsert7 = insertelement <8 x double> %vecinsert5, double %sub6, i32 6
+ %vecinsert8 = insertelement <8 x double> %vecinsert7, double %add7, i32 7
+ ret <8 x double> %vecinsert8
+}
+
attributes #0 = { nounwind "unsafe-fp-math"="true" }
More information about the llvm-commits
mailing list