[llvm] a0c2c6a - [X86][AVX] Fold CONCAT(HOP(X,Y),HOP(Z,W)) -> HOP(CONCAT(X,Z),CONCAT(Y,W)) for float types

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 12 03:44:24 PDT 2020


Author: Simon Pilgrim
Date: 2020-08-12T11:31:05+01:00
New Revision: a0c2c6aa420e572520337b782410c0c04ba87b69

URL: https://github.com/llvm/llvm-project/commit/a0c2c6aa420e572520337b782410c0c04ba87b69
DIFF: https://github.com/llvm/llvm-project/commit/a0c2c6aa420e572520337b782410c0c04ba87b69.diff

LOG: [X86][AVX] Fold CONCAT(HOP(X,Y),HOP(Z,W)) -> HOP(CONCAT(X,Z),CONCAT(Y,W)) for float types

Only do this for AVX2+ targets as we still get some regressions on AVX1 without PERMPD/PERMQ

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/haddsub-2.ll
    llvm/test/CodeGen/X86/haddsub-undef.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 9ca4d3386960..3c1e8cbbad1e 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -48257,6 +48257,8 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
       break;
     case X86ISD::HADD:
     case X86ISD::HSUB:
+    case X86ISD::FHADD:
+    case X86ISD::FHSUB:
     case X86ISD::PACKSS:
     case X86ISD::PACKUS:
       if (!IsSplat && VT.is256BitVector() && Subtarget.hasInt256()) {

diff  --git a/llvm/test/CodeGen/X86/haddsub-2.ll b/llvm/test/CodeGen/X86/haddsub-2.ll
index f28ed59621e7..ff5ce0919b3f 100644
--- a/llvm/test/CodeGen/X86/haddsub-2.ll
+++ b/llvm/test/CodeGen/X86/haddsub-2.ll
@@ -444,14 +444,21 @@ define <4 x double> @avx_vhadd_pd_test(<4 x double> %A, <4 x double> %B) {
 ; SSE-NEXT:    movapd %xmm2, %xmm1
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: avx_vhadd_pd_test:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX-NEXT:    vhaddpd %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX-NEXT:    vhaddpd %xmm2, %xmm0, %xmm0
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT:    retq
+; AVX1-LABEL: avx_vhadd_pd_test:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vhaddpd %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vhaddpd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: avx_vhadd_pd_test:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
+; AVX2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vhaddpd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    retq
   %vecext = extractelement <4 x double> %A, i32 0
   %vecext1 = extractelement <4 x double> %A, i32 1
   %add = fadd double %vecext, %vecext1
@@ -479,14 +486,21 @@ define <4 x double> @avx_vhsub_pd_test(<4 x double> %A, <4 x double> %B) {
 ; SSE-NEXT:    movapd %xmm2, %xmm1
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: avx_vhsub_pd_test:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX-NEXT:    vhsubpd %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX-NEXT:    vhsubpd %xmm2, %xmm0, %xmm0
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT:    retq
+; AVX1-LABEL: avx_vhsub_pd_test:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vhsubpd %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vhsubpd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: avx_vhsub_pd_test:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
+; AVX2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vhsubpd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    retq
   %vecext = extractelement <4 x double> %A, i32 0
   %vecext1 = extractelement <4 x double> %A, i32 1
   %sub = fsub double %vecext, %vecext1

diff  --git a/llvm/test/CodeGen/X86/haddsub-undef.ll b/llvm/test/CodeGen/X86/haddsub-undef.ll
index f8379e629c02..7d797b7851f8 100644
--- a/llvm/test/CodeGen/X86/haddsub-undef.ll
+++ b/llvm/test/CodeGen/X86/haddsub-undef.ll
@@ -1146,14 +1146,22 @@ define <4 x double> @PR34724_add_v4f64_u123(<4 x double> %0, <4 x double> %1) {
 ; AVX-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX-SLOW-NEXT:    retq
 ;
-; AVX-FAST-LABEL: PR34724_add_v4f64_u123:
-; AVX-FAST:       # %bb.0:
-; AVX-FAST-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX-FAST-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX-FAST-NEXT:    vhaddpd %xmm2, %xmm1, %xmm1
-; AVX-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
-; AVX-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-FAST-NEXT:    retq
+; AVX1-FAST-LABEL: PR34724_add_v4f64_u123:
+; AVX1-FAST:       # %bb.0:
+; AVX1-FAST-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-FAST-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-FAST-NEXT:    vhaddpd %xmm2, %xmm1, %xmm1
+; AVX1-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; AVX1-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-FAST-NEXT:    retq
+;
+; AVX512-FAST-LABEL: PR34724_add_v4f64_u123:
+; AVX512-FAST:       # %bb.0:
+; AVX512-FAST-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX512-FAST-NEXT:    vblendpd {{.*#+}} ymm2 = ymm0[0,1],ymm1[2,3]
+; AVX512-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-FAST-NEXT:    vhaddpd %ymm2, %ymm0, %ymm0
+; AVX512-FAST-NEXT:    retq
   %3 = shufflevector <4 x double> %0, <4 x double> %1, <2 x i32> <i32 2, i32 4>
   %4 = shufflevector <4 x double> %0, <4 x double> %1, <2 x i32> <i32 3, i32 5>
   %5 = fadd <2 x double> %3, %4
@@ -1196,13 +1204,19 @@ define <4 x double> @PR34724_add_v4f64_0u23(<4 x double> %0, <4 x double> %1) {
 ; AVX-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX-SLOW-NEXT:    retq
 ;
-; AVX-FAST-LABEL: PR34724_add_v4f64_0u23:
-; AVX-FAST:       # %bb.0:
-; AVX-FAST-NEXT:    vhaddpd %xmm1, %xmm0, %xmm0
-; AVX-FAST-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX-FAST-NEXT:    vhaddpd %xmm2, %xmm1, %xmm1
-; AVX-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-FAST-NEXT:    retq
+; AVX1-FAST-LABEL: PR34724_add_v4f64_0u23:
+; AVX1-FAST:       # %bb.0:
+; AVX1-FAST-NEXT:    vhaddpd %xmm1, %xmm0, %xmm0
+; AVX1-FAST-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-FAST-NEXT:    vhaddpd %xmm2, %xmm1, %xmm1
+; AVX1-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-FAST-NEXT:    retq
+;
+; AVX512-FAST-LABEL: PR34724_add_v4f64_0u23:
+; AVX512-FAST:       # %bb.0:
+; AVX512-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-FAST-NEXT:    vhaddpd %ymm1, %ymm0, %ymm0
+; AVX512-FAST-NEXT:    retq
   %3 = shufflevector <4 x double> %0, <4 x double> %1, <2 x i32> <i32 0, i32 4>
   %4 = shufflevector <4 x double> %0, <4 x double> %1, <2 x i32> <i32 1, i32 5>
   %5 = fadd <2 x double> %3, %4
@@ -1245,14 +1259,21 @@ define <4 x double> @PR34724_add_v4f64_01u3(<4 x double> %0, <4 x double> %1) {
 ; AVX-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX-SLOW-NEXT:    retq
 ;
-; AVX-FAST-LABEL: PR34724_add_v4f64_01u3:
-; AVX-FAST:       # %bb.0:
-; AVX-FAST-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX-FAST-NEXT:    vhaddpd %xmm2, %xmm0, %xmm0
-; AVX-FAST-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; AVX-FAST-NEXT:    vhaddpd %xmm1, %xmm1, %xmm1
-; AVX-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-FAST-NEXT:    retq
+; AVX1-FAST-LABEL: PR34724_add_v4f64_01u3:
+; AVX1-FAST:       # %bb.0:
+; AVX1-FAST-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-FAST-NEXT:    vhaddpd %xmm2, %xmm0, %xmm0
+; AVX1-FAST-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-FAST-NEXT:    vhaddpd %xmm1, %xmm1, %xmm1
+; AVX1-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-FAST-NEXT:    retq
+;
+; AVX512-FAST-LABEL: PR34724_add_v4f64_01u3:
+; AVX512-FAST:       # %bb.0:
+; AVX512-FAST-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
+; AVX512-FAST-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX512-FAST-NEXT:    vhaddpd %ymm2, %ymm0, %ymm0
+; AVX512-FAST-NEXT:    retq
   %3 = shufflevector <4 x double> %0, <4 x double> undef, <2 x i32> <i32 0, i32 2>
   %4 = shufflevector <4 x double> %0, <4 x double> undef, <2 x i32> <i32 1, i32 3>
   %5 = fadd <2 x double> %3, %4
@@ -1292,13 +1313,21 @@ define <4 x double> @PR34724_add_v4f64_012u(<4 x double> %0, <4 x double> %1) {
 ; AVX-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX-SLOW-NEXT:    retq
 ;
-; AVX-FAST-LABEL: PR34724_add_v4f64_012u:
-; AVX-FAST:       # %bb.0:
-; AVX-FAST-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX-FAST-NEXT:    vhaddpd %xmm2, %xmm0, %xmm0
-; AVX-FAST-NEXT:    vhaddpd %xmm1, %xmm1, %xmm1
-; AVX-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-FAST-NEXT:    retq
+; AVX1-FAST-LABEL: PR34724_add_v4f64_012u:
+; AVX1-FAST:       # %bb.0:
+; AVX1-FAST-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-FAST-NEXT:    vhaddpd %xmm2, %xmm0, %xmm0
+; AVX1-FAST-NEXT:    vhaddpd %xmm1, %xmm1, %xmm1
+; AVX1-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-FAST-NEXT:    retq
+;
+; AVX512-FAST-LABEL: PR34724_add_v4f64_012u:
+; AVX512-FAST:       # %bb.0:
+; AVX512-FAST-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX512-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX512-FAST-NEXT:    vhaddpd %ymm1, %ymm0, %ymm0
+; AVX512-FAST-NEXT:    retq
   %3 = shufflevector <4 x double> %0, <4 x double> undef, <2 x i32> <i32 0, i32 2>
   %4 = shufflevector <4 x double> %0, <4 x double> undef, <2 x i32> <i32 1, i32 3>
   %5 = fadd <2 x double> %3, %4


        


More information about the llvm-commits mailing list