[llvm] b1e6ca9 - [X86] fmaddsub/fmsubadd combines - add NOFMA target for reference

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 5 09:47:53 PDT 2022


Author: Simon Pilgrim
Date: 2022-04-05T17:47:46+01:00
New Revision: b1e6ca9d227dfdc0f01e83b62e2af1b05d3fc9a7

URL: https://github.com/llvm/llvm-project/commit/b1e6ca9d227dfdc0f01e83b62e2af1b05d3fc9a7
DIFF: https://github.com/llvm/llvm-project/commit/b1e6ca9d227dfdc0f01e83b62e2af1b05d3fc9a7.diff

LOG: [X86] fmaddsub/fmsubadd combines - add NOFMA target for reference

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/fmaddsub-combine.ll
    llvm/test/CodeGen/X86/fmsubadd-combine.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/fmaddsub-combine.ll b/llvm/test/CodeGen/X86/fmaddsub-combine.ll
index 739fa8297c44f..7e5916025eca0 100644
--- a/llvm/test/CodeGen/X86/fmaddsub-combine.ll
+++ b/llvm/test/CodeGen/X86/fmaddsub-combine.ll
@@ -1,4 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx | FileCheck %s -check-prefixes=NOFMA
 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+fma | FileCheck %s -check-prefixes=FMA3,FMA3_256
 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+fma,+avx512f | FileCheck %s -check-prefixes=FMA3,FMA3_512
 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+fma4 | FileCheck %s -check-prefixes=FMA4
@@ -6,6 +7,12 @@
 ; This test checks the fusing of MUL + ADDSUB to FMADDSUB.
 
 define <2 x double> @mul_addsub_pd128(<2 x double> %A, <2 x double> %B,  <2 x double> %C) #0 {
+; NOFMA-LABEL: mul_addsub_pd128:
+; NOFMA:       # %bb.0: # %entry
+; NOFMA-NEXT:    vmulpd %xmm1, %xmm0, %xmm0
+; NOFMA-NEXT:    vaddsubpd %xmm2, %xmm0, %xmm0
+; NOFMA-NEXT:    retq
+;
 ; FMA3-LABEL: mul_addsub_pd128:
 ; FMA3:       # %bb.0: # %entry
 ; FMA3-NEXT:    vfmaddsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2
@@ -24,6 +31,12 @@ entry:
 }
 
 define <4 x float> @mul_addsub_ps128(<4 x float> %A, <4 x float> %B, <4 x float> %C) #0 {
+; NOFMA-LABEL: mul_addsub_ps128:
+; NOFMA:       # %bb.0: # %entry
+; NOFMA-NEXT:    vmulps %xmm1, %xmm0, %xmm0
+; NOFMA-NEXT:    vaddsubps %xmm2, %xmm0, %xmm0
+; NOFMA-NEXT:    retq
+;
 ; FMA3-LABEL: mul_addsub_ps128:
 ; FMA3:       # %bb.0: # %entry
 ; FMA3-NEXT:    vfmaddsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2
@@ -42,6 +55,12 @@ entry:
 }
 
 define <4 x double> @mul_addsub_pd256(<4 x double> %A, <4 x double> %B, <4 x double> %C) #0 {
+; NOFMA-LABEL: mul_addsub_pd256:
+; NOFMA:       # %bb.0: # %entry
+; NOFMA-NEXT:    vmulpd %ymm1, %ymm0, %ymm0
+; NOFMA-NEXT:    vaddsubpd %ymm2, %ymm0, %ymm0
+; NOFMA-NEXT:    retq
+;
 ; FMA3-LABEL: mul_addsub_pd256:
 ; FMA3:       # %bb.0: # %entry
 ; FMA3-NEXT:    vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2
@@ -60,6 +79,12 @@ entry:
 }
 
 define <8 x float> @mul_addsub_ps256(<8 x float> %A, <8 x float> %B, <8 x float> %C) #0 {
+; NOFMA-LABEL: mul_addsub_ps256:
+; NOFMA:       # %bb.0: # %entry
+; NOFMA-NEXT:    vmulps %ymm1, %ymm0, %ymm0
+; NOFMA-NEXT:    vaddsubps %ymm2, %ymm0, %ymm0
+; NOFMA-NEXT:    retq
+;
 ; FMA3-LABEL: mul_addsub_ps256:
 ; FMA3:       # %bb.0: # %entry
 ; FMA3-NEXT:    vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2
@@ -78,6 +103,14 @@ entry:
 }
 
 define <8 x double> @mul_addsub_pd512(<8 x double> %A, <8 x double> %B, <8 x double> %C) #0 {
+; NOFMA-LABEL: mul_addsub_pd512:
+; NOFMA:       # %bb.0: # %entry
+; NOFMA-NEXT:    vmulpd %ymm3, %ymm1, %ymm1
+; NOFMA-NEXT:    vmulpd %ymm2, %ymm0, %ymm0
+; NOFMA-NEXT:    vaddsubpd %ymm4, %ymm0, %ymm0
+; NOFMA-NEXT:    vaddsubpd %ymm5, %ymm1, %ymm1
+; NOFMA-NEXT:    retq
+;
 ; FMA3_256-LABEL: mul_addsub_pd512:
 ; FMA3_256:       # %bb.0: # %entry
 ; FMA3_256-NEXT:    vfmaddsub213pd {{.*#+}} ymm0 = (ymm2 * ymm0) +/- ymm4
@@ -103,6 +136,14 @@ entry:
 }
 
 define <16 x float> @mul_addsub_ps512(<16 x float> %A, <16 x float> %B, <16 x float> %C) #0 {
+; NOFMA-LABEL: mul_addsub_ps512:
+; NOFMA:       # %bb.0: # %entry
+; NOFMA-NEXT:    vmulps %ymm3, %ymm1, %ymm1
+; NOFMA-NEXT:    vmulps %ymm2, %ymm0, %ymm0
+; NOFMA-NEXT:    vaddsubps %ymm4, %ymm0, %ymm0
+; NOFMA-NEXT:    vaddsubps %ymm5, %ymm1, %ymm1
+; NOFMA-NEXT:    retq
+;
 ; FMA3_256-LABEL: mul_addsub_ps512:
 ; FMA3_256:       # %bb.0: # %entry
 ; FMA3_256-NEXT:    vfmaddsub213ps {{.*#+}} ymm0 = (ymm2 * ymm0) +/- ymm4
@@ -128,6 +169,12 @@ entry:
 }
 
 define <4 x float> @buildvector_mul_addsub_ps128(<4 x float> %C, <4 x float> %D, <4 x float> %B) #0 {
+; NOFMA-LABEL: buildvector_mul_addsub_ps128:
+; NOFMA:       # %bb.0: # %bb
+; NOFMA-NEXT:    vmulps %xmm1, %xmm0, %xmm0
+; NOFMA-NEXT:    vaddsubps %xmm2, %xmm0, %xmm0
+; NOFMA-NEXT:    retq
+;
 ; FMA3-LABEL: buildvector_mul_addsub_ps128:
 ; FMA3:       # %bb.0: # %bb
 ; FMA3-NEXT:    vfmaddsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2
@@ -159,6 +206,12 @@ bb:
 }
 
 define <2 x double> @buildvector_mul_addsub_pd128(<2 x double> %C, <2 x double> %D, <2 x double> %B) #0 {
+; NOFMA-LABEL: buildvector_mul_addsub_pd128:
+; NOFMA:       # %bb.0: # %bb
+; NOFMA-NEXT:    vmulpd %xmm1, %xmm0, %xmm0
+; NOFMA-NEXT:    vaddsubpd %xmm2, %xmm0, %xmm0
+; NOFMA-NEXT:    retq
+;
 ; FMA3-LABEL: buildvector_mul_addsub_pd128:
 ; FMA3:       # %bb.0: # %bb
 ; FMA3-NEXT:    vfmaddsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2
@@ -182,6 +235,12 @@ bb:
 }
 
 define <8 x float> @buildvector_mul_addsub_ps256(<8 x float> %C, <8 x float> %D, <8 x float> %B) #0 {
+; NOFMA-LABEL: buildvector_mul_addsub_ps256:
+; NOFMA:       # %bb.0: # %bb
+; NOFMA-NEXT:    vmulps %ymm1, %ymm0, %ymm0
+; NOFMA-NEXT:    vaddsubps %ymm2, %ymm0, %ymm0
+; NOFMA-NEXT:    retq
+;
 ; FMA3-LABEL: buildvector_mul_addsub_ps256:
 ; FMA3:       # %bb.0: # %bb
 ; FMA3-NEXT:    vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2
@@ -229,6 +288,12 @@ bb:
 }
 
 define <4 x double> @buildvector_mul_addsub_pd256(<4 x double> %C, <4 x double> %D, <4 x double> %B) #0 {
+; NOFMA-LABEL: buildvector_mul_addsub_pd256:
+; NOFMA:       # %bb.0: # %bb
+; NOFMA-NEXT:    vmulpd %ymm1, %ymm0, %ymm0
+; NOFMA-NEXT:    vaddsubpd %ymm2, %ymm0, %ymm0
+; NOFMA-NEXT:    retq
+;
 ; FMA3-LABEL: buildvector_mul_addsub_pd256:
 ; FMA3:       # %bb.0: # %bb
 ; FMA3-NEXT:    vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2
@@ -260,6 +325,14 @@ bb:
 }
 
 define <16 x float> @buildvector_mul_addsub_ps512(<16 x float> %C, <16 x float> %D, <16 x float> %B) #0 {
+; NOFMA-LABEL: buildvector_mul_addsub_ps512:
+; NOFMA:       # %bb.0: # %bb
+; NOFMA-NEXT:    vmulps %ymm3, %ymm1, %ymm1
+; NOFMA-NEXT:    vmulps %ymm2, %ymm0, %ymm0
+; NOFMA-NEXT:    vaddsubps %ymm4, %ymm0, %ymm0
+; NOFMA-NEXT:    vaddsubps %ymm5, %ymm1, %ymm1
+; NOFMA-NEXT:    retq
+;
 ; FMA3_256-LABEL: buildvector_mul_addsub_ps512:
 ; FMA3_256:       # %bb.0: # %bb
 ; FMA3_256-NEXT:    vfmaddsub213ps {{.*#+}} ymm0 = (ymm2 * ymm0) +/- ymm4
@@ -346,6 +419,14 @@ bb:
 }
 
 define <8 x double> @buildvector_mul_addsub_pd512(<8 x double> %C, <8 x double> %D, <8 x double> %B) #0 {
+; NOFMA-LABEL: buildvector_mul_addsub_pd512:
+; NOFMA:       # %bb.0: # %bb
+; NOFMA-NEXT:    vmulpd %ymm3, %ymm1, %ymm1
+; NOFMA-NEXT:    vmulpd %ymm2, %ymm0, %ymm0
+; NOFMA-NEXT:    vaddsubpd %ymm4, %ymm0, %ymm0
+; NOFMA-NEXT:    vaddsubpd %ymm5, %ymm1, %ymm1
+; NOFMA-NEXT:    retq
+;
 ; FMA3_256-LABEL: buildvector_mul_addsub_pd512:
 ; FMA3_256:       # %bb.0: # %bb
 ; FMA3_256-NEXT:    vfmaddsub213pd {{.*#+}} ymm0 = (ymm2 * ymm0) +/- ymm4
@@ -397,6 +478,24 @@ bb:
 }
 
 define <4 x float> @buildvector_mul_subadd_ps128(<4 x float> %C, <4 x float> %D, <4 x float> %B) #0 {
+; NOFMA-LABEL: buildvector_mul_subadd_ps128:
+; NOFMA:       # %bb.0: # %bb
+; NOFMA-NEXT:    vmulps %xmm1, %xmm0, %xmm0
+; NOFMA-NEXT:    vaddss %xmm2, %xmm0, %xmm1
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm2[1,0]
+; NOFMA-NEXT:    vaddss %xmm4, %xmm3, %xmm3
+; NOFMA-NEXT:    vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; NOFMA-NEXT:    vmovshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
+; NOFMA-NEXT:    vsubss %xmm5, %xmm4, %xmm4
+; NOFMA-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2,3]
+; NOFMA-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
+; NOFMA-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; NOFMA-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[3,3,3,3]
+; NOFMA-NEXT:    vsubss %xmm2, %xmm0, %xmm0
+; NOFMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; NOFMA-NEXT:    retq
+;
 ; FMA3-LABEL: buildvector_mul_subadd_ps128:
 ; FMA3:       # %bb.0: # %bb
 ; FMA3-NEXT:    vfmsubadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2
@@ -428,6 +527,16 @@ bb:
 }
 
 define <2 x double> @buildvector_mul_subadd_pd128(<2 x double> %C, <2 x double> %D, <2 x double> %B) #0 {
+; NOFMA-LABEL: buildvector_mul_subadd_pd128:
+; NOFMA:       # %bb.0: # %bb
+; NOFMA-NEXT:    vmulpd %xmm1, %xmm0, %xmm0
+; NOFMA-NEXT:    vaddsd %xmm2, %xmm0, %xmm1
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; NOFMA-NEXT:    vsubsd %xmm2, %xmm0, %xmm0
+; NOFMA-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; NOFMA-NEXT:    retq
+;
 ; FMA3-LABEL: buildvector_mul_subadd_pd128:
 ; FMA3:       # %bb.0: # %bb
 ; FMA3-NEXT:    vfmsubadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2
@@ -451,6 +560,40 @@ bb:
 }
 
 define <8 x float> @buildvector_mul_subadd_ps256(<8 x float> %C, <8 x float> %D, <8 x float> %B) #0 {
+; NOFMA-LABEL: buildvector_mul_subadd_ps256:
+; NOFMA:       # %bb.0: # %bb
+; NOFMA-NEXT:    vmulps %ymm1, %ymm0, %ymm0
+; NOFMA-NEXT:    vaddss %xmm2, %xmm0, %xmm1
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm2[1,0]
+; NOFMA-NEXT:    vaddss %xmm4, %xmm3, %xmm3
+; NOFMA-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; NOFMA-NEXT:    vextractf128 $1, %ymm2, %xmm5
+; NOFMA-NEXT:    vaddss %xmm5, %xmm4, %xmm8
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm7 = xmm4[1,0]
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm6 = xmm5[1,0]
+; NOFMA-NEXT:    vaddss %xmm6, %xmm7, %xmm9
+; NOFMA-NEXT:    vmovshdup {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; NOFMA-NEXT:    vmovshdup {{.*#+}} xmm6 = xmm2[1,1,3,3]
+; NOFMA-NEXT:    vsubss %xmm6, %xmm7, %xmm6
+; NOFMA-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[2,3]
+; NOFMA-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
+; NOFMA-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; NOFMA-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[3,3,3,3]
+; NOFMA-NEXT:    vsubss %xmm2, %xmm0, %xmm0
+; NOFMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; NOFMA-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm4[1,1,3,3]
+; NOFMA-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm5[1,1,3,3]
+; NOFMA-NEXT:    vsubss %xmm2, %xmm1, %xmm1
+; NOFMA-NEXT:    vinsertps {{.*#+}} xmm1 = xmm8[0],xmm1[0],xmm8[2,3]
+; NOFMA-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm9[0],xmm1[3]
+; NOFMA-NEXT:    vpermilps {{.*#+}} xmm2 = xmm4[3,3,3,3]
+; NOFMA-NEXT:    vpermilps {{.*#+}} xmm3 = xmm5[3,3,3,3]
+; NOFMA-NEXT:    vsubss %xmm3, %xmm2, %xmm2
+; NOFMA-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
+; NOFMA-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; NOFMA-NEXT:    retq
+;
 ; FMA3-LABEL: buildvector_mul_subadd_ps256:
 ; FMA3:       # %bb.0: # %bb
 ; FMA3-NEXT:    vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2
@@ -498,6 +641,24 @@ bb:
 }
 
 define <4 x double> @buildvector_mul_subadd_pd256(<4 x double> %C, <4 x double> %D, <4 x double> %B) #0 {
+; NOFMA-LABEL: buildvector_mul_subadd_pd256:
+; NOFMA:       # %bb.0: # %bb
+; NOFMA-NEXT:    vmulpd %ymm1, %ymm0, %ymm0
+; NOFMA-NEXT:    vaddsd %xmm2, %xmm0, %xmm1
+; NOFMA-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; NOFMA-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; NOFMA-NEXT:    vaddsd %xmm4, %xmm3, %xmm5
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; NOFMA-NEXT:    vsubsd %xmm2, %xmm0, %xmm0
+; NOFMA-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm3[1,0]
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm4[1,0]
+; NOFMA-NEXT:    vsubsd %xmm2, %xmm1, %xmm1
+; NOFMA-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm5[0],xmm1[0]
+; NOFMA-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; NOFMA-NEXT:    retq
+;
 ; FMA3-LABEL: buildvector_mul_subadd_pd256:
 ; FMA3:       # %bb.0: # %bb
 ; FMA3-NEXT:    vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2
@@ -529,6 +690,64 @@ bb:
 }
 
 define <16 x float> @buildvector_mul_subadd_ps512(<16 x float> %C, <16 x float> %D, <16 x float> %B) #0 {
+; NOFMA-LABEL: buildvector_mul_subadd_ps512:
+; NOFMA:       # %bb.0: # %bb
+; NOFMA-NEXT:    vmulps %ymm3, %ymm1, %ymm1
+; NOFMA-NEXT:    vmulps %ymm2, %ymm0, %ymm0
+; NOFMA-NEXT:    vaddss %xmm4, %xmm0, %xmm8
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm6 = xmm4[1,0]
+; NOFMA-NEXT:    vaddss %xmm6, %xmm3, %xmm9
+; NOFMA-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; NOFMA-NEXT:    vextractf128 $1, %ymm4, %xmm7
+; NOFMA-NEXT:    vaddss %xmm7, %xmm6, %xmm10
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm6[1,0]
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm7[1,0]
+; NOFMA-NEXT:    vaddss %xmm2, %xmm3, %xmm2
+; NOFMA-NEXT:    vinsertps {{.*#+}} xmm11 = xmm10[0,1],xmm2[0],xmm10[3]
+; NOFMA-NEXT:    vaddss %xmm5, %xmm1, %xmm10
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm5[1,0]
+; NOFMA-NEXT:    vaddss %xmm2, %xmm3, %xmm12
+; NOFMA-NEXT:    vextractf128 $1, %ymm1, %xmm14
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm13 = xmm14[1,0]
+; NOFMA-NEXT:    vextractf128 $1, %ymm5, %xmm15
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm15[1,0]
+; NOFMA-NEXT:    vaddss %xmm3, %xmm13, %xmm13
+; NOFMA-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; NOFMA-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; NOFMA-NEXT:    vsubss %xmm2, %xmm3, %xmm2
+; NOFMA-NEXT:    vinsertps {{.*#+}} xmm2 = xmm8[0],xmm2[0],xmm8[2,3]
+; NOFMA-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0],xmm2[3]
+; NOFMA-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; NOFMA-NEXT:    vpermilps {{.*#+}} xmm3 = xmm4[3,3,3,3]
+; NOFMA-NEXT:    vsubss %xmm3, %xmm0, %xmm0
+; NOFMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
+; NOFMA-NEXT:    vpermilps {{.*#+}} xmm2 = xmm6[3,3,3,3]
+; NOFMA-NEXT:    vpermilps {{.*#+}} xmm3 = xmm7[3,3,3,3]
+; NOFMA-NEXT:    vsubss %xmm3, %xmm2, %xmm2
+; NOFMA-NEXT:    vinsertps {{.*#+}} xmm2 = xmm11[0,1,2],xmm2[0]
+; NOFMA-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; NOFMA-NEXT:    vmovshdup {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; NOFMA-NEXT:    vsubss %xmm4, %xmm3, %xmm3
+; NOFMA-NEXT:    vinsertps {{.*#+}} xmm3 = xmm10[0],xmm3[0],xmm10[2,3]
+; NOFMA-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm12[0],xmm3[3]
+; NOFMA-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[3,3,3,3]
+; NOFMA-NEXT:    vpermilps {{.*#+}} xmm4 = xmm5[3,3,3,3]
+; NOFMA-NEXT:    vsubss %xmm4, %xmm1, %xmm1
+; NOFMA-NEXT:    vinsertps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[0]
+; NOFMA-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm14[1,1,3,3]
+; NOFMA-NEXT:    vmovshdup {{.*#+}} xmm4 = xmm15[1,1,3,3]
+; NOFMA-NEXT:    vsubss %xmm4, %xmm3, %xmm3
+; NOFMA-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[0,0],xmm13[0,0]
+; NOFMA-NEXT:    vpermilps {{.*#+}} xmm4 = xmm14[3,3,3,3]
+; NOFMA-NEXT:    vpermilps {{.*#+}} xmm5 = xmm15[3,3,3,3]
+; NOFMA-NEXT:    vsubss %xmm5, %xmm4, %xmm4
+; NOFMA-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
+; NOFMA-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; NOFMA-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; NOFMA-NEXT:    retq
+;
 ; FMA3_256-LABEL: buildvector_mul_subadd_ps512:
 ; FMA3_256:       # %bb.0: # %bb
 ; FMA3_256-NEXT:    vfmsubadd213ps {{.*#+}} ymm0 = (ymm2 * ymm0) -/+ ymm4
@@ -615,6 +834,34 @@ bb:
 }
 
 define <8 x double> @buildvector_mul_subadd_pd512(<8 x double> %C, <8 x double> %D, <8 x double> %B) #0 {
+; NOFMA-LABEL: buildvector_mul_subadd_pd512:
+; NOFMA:       # %bb.0: # %bb
+; NOFMA-NEXT:    vmulpd %ymm3, %ymm1, %ymm1
+; NOFMA-NEXT:    vmulpd %ymm2, %ymm0, %ymm0
+; NOFMA-NEXT:    vaddsd %xmm4, %xmm0, %xmm2
+; NOFMA-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; NOFMA-NEXT:    vextractf128 $1, %ymm4, %xmm6
+; NOFMA-NEXT:    vaddsd %xmm6, %xmm3, %xmm9
+; NOFMA-NEXT:    vaddsd %xmm5, %xmm1, %xmm8
+; NOFMA-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; NOFMA-NEXT:    vextractf128 $1, %ymm5, %xmm5
+; NOFMA-NEXT:    vaddsd %xmm5, %xmm1, %xmm7
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; NOFMA-NEXT:    vsubsd %xmm4, %xmm0, %xmm0
+; NOFMA-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm3[1,0]
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm6[1,0]
+; NOFMA-NEXT:    vsubsd %xmm3, %xmm2, %xmm2
+; NOFMA-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm9[0],xmm2[0]
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; NOFMA-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm5[1,0]
+; NOFMA-NEXT:    vsubsd %xmm3, %xmm1, %xmm1
+; NOFMA-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm7[0],xmm1[0]
+; NOFMA-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; NOFMA-NEXT:    vinsertf128 $1, %xmm1, %ymm8, %ymm1
+; NOFMA-NEXT:    retq
+;
 ; FMA3_256-LABEL: buildvector_mul_subadd_pd512:
 ; FMA3_256:       # %bb.0: # %bb
 ; FMA3_256-NEXT:    vfmsubadd213pd {{.*#+}} ymm0 = (ymm2 * ymm0) -/+ ymm4

diff  --git a/llvm/test/CodeGen/X86/fmsubadd-combine.ll b/llvm/test/CodeGen/X86/fmsubadd-combine.ll
index 76262561c987a..ddf51b858cdd8 100644
--- a/llvm/test/CodeGen/X86/fmsubadd-combine.ll
+++ b/llvm/test/CodeGen/X86/fmsubadd-combine.ll
@@ -1,11 +1,20 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+fma | FileCheck -check-prefix=FMA3 -check-prefix=FMA3_256 %s
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+fma,+avx512f | FileCheck -check-prefix=FMA3 -check-prefix=FMA3_512 %s
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+fma4 | FileCheck -check-prefix=FMA4 %s
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx | FileCheck %s -check-prefixes=CHECK,NOFMA
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+fma | FileCheck %s -check-prefixes=CHECK,FMA3,FMA3_256
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+fma,+avx512f | FileCheck %s -check-prefixes=CHECK,FMA3,FMA3_512
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+fma4 | FileCheck %s -check-prefixes=CHECK,FMA4
 
 ; This test checks the fusing of MUL + SUB/ADD to FMSUBADD.
 
 define <2 x double> @mul_subadd_pd128(<2 x double> %A, <2 x double> %B, <2 x double> %C) #0 {
+; NOFMA-LABEL: mul_subadd_pd128:
+; NOFMA:       # %bb.0: # %entry
+; NOFMA-NEXT:    vmulpd %xmm1, %xmm0, %xmm0
+; NOFMA-NEXT:    vsubpd %xmm2, %xmm0, %xmm1
+; NOFMA-NEXT:    vaddpd %xmm2, %xmm0, %xmm0
+; NOFMA-NEXT:    vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; NOFMA-NEXT:    retq
+;
 ; FMA3-LABEL: mul_subadd_pd128:
 ; FMA3:       # %bb.0: # %entry
 ; FMA3-NEXT:    vfmsubadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2
@@ -24,6 +33,14 @@ entry:
 }
 
 define <4 x float> @mul_subadd_ps128(<4 x float> %A, <4 x float> %B, <4 x float> %C) #0 {
+; NOFMA-LABEL: mul_subadd_ps128:
+; NOFMA:       # %bb.0: # %entry
+; NOFMA-NEXT:    vmulps %xmm1, %xmm0, %xmm0
+; NOFMA-NEXT:    vsubps %xmm2, %xmm0, %xmm1
+; NOFMA-NEXT:    vaddps %xmm2, %xmm0, %xmm0
+; NOFMA-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; NOFMA-NEXT:    retq
+;
 ; FMA3-LABEL: mul_subadd_ps128:
 ; FMA3:       # %bb.0: # %entry
 ; FMA3-NEXT:    vfmsubadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2
@@ -42,6 +59,14 @@ entry:
 }
 
 define <4 x double> @mul_subadd_pd256(<4 x double> %A, <4 x double> %B, <4 x double> %C) #0 {
+; NOFMA-LABEL: mul_subadd_pd256:
+; NOFMA:       # %bb.0: # %entry
+; NOFMA-NEXT:    vmulpd %ymm1, %ymm0, %ymm0
+; NOFMA-NEXT:    vsubpd %ymm2, %ymm0, %ymm1
+; NOFMA-NEXT:    vaddpd %ymm2, %ymm0, %ymm0
+; NOFMA-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
+; NOFMA-NEXT:    retq
+;
 ; FMA3-LABEL: mul_subadd_pd256:
 ; FMA3:       # %bb.0: # %entry
 ; FMA3-NEXT:    vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2
@@ -60,6 +85,14 @@ entry:
 }
 
 define <8 x float> @mul_subadd_ps256(<8 x float> %A, <8 x float> %B, <8 x float> %C) #0 {
+; NOFMA-LABEL: mul_subadd_ps256:
+; NOFMA:       # %bb.0: # %entry
+; NOFMA-NEXT:    vmulps %ymm1, %ymm0, %ymm0
+; NOFMA-NEXT:    vsubps %ymm2, %ymm0, %ymm1
+; NOFMA-NEXT:    vaddps %ymm2, %ymm0, %ymm0
+; NOFMA-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; NOFMA-NEXT:    retq
+;
 ; FMA3-LABEL: mul_subadd_ps256:
 ; FMA3:       # %bb.0: # %entry
 ; FMA3-NEXT:    vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2
@@ -78,6 +111,18 @@ entry:
 }
 
 define <8 x double> @mul_subadd_pd512(<8 x double> %A, <8 x double> %B, <8 x double> %C) #0 {
+; NOFMA-LABEL: mul_subadd_pd512:
+; NOFMA:       # %bb.0: # %entry
+; NOFMA-NEXT:    vmulpd %ymm2, %ymm0, %ymm0
+; NOFMA-NEXT:    vmulpd %ymm3, %ymm1, %ymm1
+; NOFMA-NEXT:    vsubpd %ymm5, %ymm1, %ymm2
+; NOFMA-NEXT:    vsubpd %ymm4, %ymm0, %ymm3
+; NOFMA-NEXT:    vaddpd %ymm5, %ymm1, %ymm1
+; NOFMA-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3]
+; NOFMA-NEXT:    vaddpd %ymm4, %ymm0, %ymm0
+; NOFMA-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3]
+; NOFMA-NEXT:    retq
+;
 ; FMA3_256-LABEL: mul_subadd_pd512:
 ; FMA3_256:       # %bb.0: # %entry
 ; FMA3_256-NEXT:    vfmsubadd213pd {{.*#+}} ymm0 = (ymm2 * ymm0) -/+ ymm4
@@ -103,6 +148,18 @@ entry:
 }
 
 define <16 x float> @mul_subadd_ps512(<16 x float> %A, <16 x float> %B, <16 x float> %C) #0 {
+; NOFMA-LABEL: mul_subadd_ps512:
+; NOFMA:       # %bb.0: # %entry
+; NOFMA-NEXT:    vmulps %ymm2, %ymm0, %ymm0
+; NOFMA-NEXT:    vmulps %ymm3, %ymm1, %ymm1
+; NOFMA-NEXT:    vsubps %ymm5, %ymm1, %ymm2
+; NOFMA-NEXT:    vsubps %ymm4, %ymm0, %ymm3
+; NOFMA-NEXT:    vaddps %ymm5, %ymm1, %ymm1
+; NOFMA-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
+; NOFMA-NEXT:    vaddps %ymm4, %ymm0, %ymm0
+; NOFMA-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3],ymm0[4],ymm3[5],ymm0[6],ymm3[7]
+; NOFMA-NEXT:    retq
+;
 ; FMA3_256-LABEL: mul_subadd_ps512:
 ; FMA3_256:       # %bb.0: # %entry
 ; FMA3_256-NEXT:    vfmsubadd213ps {{.*#+}} ymm0 = (ymm2 * ymm0) -/+ ymm4
@@ -129,21 +186,13 @@ entry:
 
 ; This should not be matched to fmsubadd because the mul is on the wrong side of the fsub.
 define <2 x double> @mul_subadd_bad_commute(<2 x double> %A, <2 x double> %B, <2 x double> %C) #0 {
-; FMA3-LABEL: mul_subadd_bad_commute:
-; FMA3:       # %bb.0: # %entry
-; FMA3-NEXT:    vmulpd %xmm1, %xmm0, %xmm0
-; FMA3-NEXT:    vsubpd %xmm0, %xmm2, %xmm1
-; FMA3-NEXT:    vaddpd %xmm2, %xmm0, %xmm0
-; FMA3-NEXT:    vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
-; FMA3-NEXT:    retq
-;
-; FMA4-LABEL: mul_subadd_bad_commute:
-; FMA4:       # %bb.0: # %entry
-; FMA4-NEXT:    vmulpd %xmm1, %xmm0, %xmm0
-; FMA4-NEXT:    vsubpd %xmm0, %xmm2, %xmm1
-; FMA4-NEXT:    vaddpd %xmm2, %xmm0, %xmm0
-; FMA4-NEXT:    vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
-; FMA4-NEXT:    retq
+; CHECK-LABEL: mul_subadd_bad_commute:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulpd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vsubpd %xmm0, %xmm2, %xmm1
+; CHECK-NEXT:    vaddpd %xmm2, %xmm0, %xmm0
+; CHECK-NEXT:    vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; CHECK-NEXT:    retq
 entry:
   %AB = fmul <2 x double> %A, %B
   %Sub = fsub <2 x double> %C, %AB


        


More information about the llvm-commits mailing list