[llvm] 866311e - [X86] lowerToAddSubOrFMAddSub - lower 512-bit ADDSUB patterns to blend(fsub,fadd)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 20 07:19:00 PST 2022


Author: Simon Pilgrim
Date: 2022-01-20T15:16:05Z
New Revision: 866311e71c8f1f3724754d006eb50299424d5b1b

URL: https://github.com/llvm/llvm-project/commit/866311e71c8f1f3724754d006eb50299424d5b1b
DIFF: https://github.com/llvm/llvm-project/commit/866311e71c8f1f3724754d006eb50299424d5b1b.diff

LOG: [X86] lowerToAddSubOrFMAddSub - lower 512-bit ADDSUB patterns to blend(fsub,fadd)

AVX512 doesn't provide a ADDSUB instruction, but if we've built this from a build vector of scalar fsub/fadd elements we can still lower to blend(fsub,fadd)

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/sse3-avx-addsub-2.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 0d12cf519a8b1..71c80d518f998 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -10079,13 +10079,18 @@ static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
   if (IsSubAdd)
     return SDValue();
 
-  // Do not generate X86ISD::ADDSUB node for 512-bit types even though
-  // the ADDSUB idiom has been successfully recognized. There are no known
-  // X86 targets with 512-bit ADDSUB instructions!
-  // 512-bit ADDSUB idiom recognition was needed only as part of FMADDSUB idiom
-  // recognition.
-  if (VT.is512BitVector())
-    return SDValue();
+  // There are no known X86 targets with 512-bit ADDSUB instructions!
+  // Convert to blend(fsub,fadd).
+  if (VT.is512BitVector()) {
+    SmallVector<int> Mask;
+    for (int I = 0, E = VT.getVectorNumElements(); I != E; I += 2) {
+        Mask.push_back(I);
+        Mask.push_back(I + E + 1);
+    }
+    SDValue Sub = DAG.getNode(ISD::FSUB, DL, VT, Opnd0, Opnd1);
+    SDValue Add = DAG.getNode(ISD::FADD, DL, VT, Opnd0, Opnd1);
+    return DAG.getVectorShuffle(VT, DL, Sub, Add, Mask);
+  }
 
   return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
 }

diff  --git a/llvm/test/CodeGen/X86/sse3-avx-addsub-2.ll b/llvm/test/CodeGen/X86/sse3-avx-addsub-2.ll
index 1087ef9193d79..3ae8a1fd6659a 100644
--- a/llvm/test/CodeGen/X86/sse3-avx-addsub-2.ll
+++ b/llvm/test/CodeGen/X86/sse3-avx-addsub-2.ll
@@ -531,67 +531,11 @@ define <16 x float> @test17(<16 x float> %A, <16 x float> %B) {
 ;
 ; AVX512-LABEL: test17:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vsubss %xmm1, %xmm0, %xmm2
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
-; AVX512-NEXT:    vsubss %xmm4, %xmm3, %xmm3
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
-; AVX512-NEXT:    vaddss %xmm5, %xmm4, %xmm4
-; AVX512-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[2,3]
-; AVX512-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm3 = xmm0[3,3,3,3]
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm4 = xmm1[3,3,3,3]
-; AVX512-NEXT:    vaddss %xmm4, %xmm3, %xmm3
-; AVX512-NEXT:    vinsertps {{.*#+}} xmm8 = xmm2[0,1,2],xmm3[0]
-; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX512-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX512-NEXT:    vsubss %xmm4, %xmm3, %xmm5
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm6 = xmm3[1,0]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm7 = xmm4[1,0]
-; AVX512-NEXT:    vsubss %xmm7, %xmm6, %xmm6
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm7 = xmm3[1,1,3,3]
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm4[1,1,3,3]
-; AVX512-NEXT:    vaddss %xmm2, %xmm7, %xmm2
-; AVX512-NEXT:    vinsertps {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[2,3]
-; AVX512-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm6[0],xmm2[3]
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm3 = xmm3[3,3,3,3]
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm4 = xmm4[3,3,3,3]
-; AVX512-NEXT:    vaddss %xmm4, %xmm3, %xmm3
-; AVX512-NEXT:    vinsertps {{.*#+}} xmm9 = xmm2[0,1,2],xmm3[0]
-; AVX512-NEXT:    vextractf32x4 $2, %zmm0, %xmm2
-; AVX512-NEXT:    vextractf32x4 $2, %zmm1, %xmm4
-; AVX512-NEXT:    vsubss %xmm4, %xmm2, %xmm5
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm6 = xmm2[1,0]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm7 = xmm4[1,0]
-; AVX512-NEXT:    vsubss %xmm7, %xmm6, %xmm6
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm7 = xmm2[1,1,3,3]
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; AVX512-NEXT:    vaddss %xmm3, %xmm7, %xmm3
-; AVX512-NEXT:    vinsertps {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[2,3]
-; AVX512-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm6[0],xmm3[3]
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[3,3,3,3]
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm4 = xmm4[3,3,3,3]
-; AVX512-NEXT:    vaddss %xmm4, %xmm2, %xmm2
-; AVX512-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[0]
-; AVX512-NEXT:    vextractf32x4 $3, %zmm0, %xmm0
-; AVX512-NEXT:    vextractf32x4 $3, %zmm1, %xmm1
-; AVX512-NEXT:    vsubss %xmm1, %xmm0, %xmm3
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm5 = xmm1[1,0]
-; AVX512-NEXT:    vsubss %xmm5, %xmm4, %xmm4
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm6 = xmm1[1,1,3,3]
-; AVX512-NEXT:    vaddss %xmm6, %xmm5, %xmm5
-; AVX512-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[2,3]
-; AVX512-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[3,3,3,3]
-; AVX512-NEXT:    vaddss %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
-; AVX512-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
-; AVX512-NEXT:    vinsertf128 $1, %xmm9, %ymm8, %ymm1
-; AVX512-NEXT:    vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT:    vsubps %zmm1, %zmm0, %zmm2
+; AVX512-NEXT:    movw $-21846, %ax # imm = 0xAAAA
+; AVX512-NEXT:    kmovw %eax, %k1
+; AVX512-NEXT:    vaddps %zmm1, %zmm0, %zmm2 {%k1}
+; AVX512-NEXT:    vmovaps %zmm2, %zmm0
 ; AVX512-NEXT:    retq
   %1 = extractelement <16 x float> %A, i32 0
   %2 = extractelement <16 x float> %B, i32 0
@@ -677,35 +621,9 @@ define <8 x double> @test18(<8 x double> %A, <8 x double> %B) {
 ;
 ; AVX512-LABEL: test18:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vsubsd %xmm1, %xmm0, %xmm2
-; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX512-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX512-NEXT:    vsubsd %xmm4, %xmm3, %xmm5
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm7 = xmm1[1,0]
-; AVX512-NEXT:    vaddsd %xmm7, %xmm6, %xmm6
-; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm6[0]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
-; AVX512-NEXT:    vaddsd %xmm4, %xmm3, %xmm3
-; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm3 = xmm5[0],xmm3[0]
-; AVX512-NEXT:    vextractf32x4 $2, %zmm0, %xmm4
-; AVX512-NEXT:    vextractf32x4 $2, %zmm1, %xmm5
-; AVX512-NEXT:    vsubsd %xmm5, %xmm4, %xmm6
-; AVX512-NEXT:    vextractf32x4 $3, %zmm0, %xmm0
-; AVX512-NEXT:    vextractf32x4 $3, %zmm1, %xmm1
-; AVX512-NEXT:    vsubsd %xmm1, %xmm0, %xmm7
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
-; AVX512-NEXT:    vaddsd %xmm5, %xmm4, %xmm4
-; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm4 = xmm6[0],xmm4[0]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
-; AVX512-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm7[0],xmm0[0]
-; AVX512-NEXT:    vinsertf128 $1, %xmm0, %ymm4, %ymm0
-; AVX512-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm1
-; AVX512-NEXT:    vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT:    vaddpd %zmm1, %zmm0, %zmm2
+; AVX512-NEXT:    vsubpd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    vshufpd {{.*#+}} zmm0 = zmm0[0],zmm2[1],zmm0[2],zmm2[3],zmm0[4],zmm2[5],zmm0[6],zmm2[7]
 ; AVX512-NEXT:    retq
   %1 = extractelement <8 x double> %A, i32 0
   %2 = extractelement <8 x double> %B, i32 0


        


More information about the llvm-commits mailing list