[llvm] 38721f2 - [X86] ComputeNumSignBitsForTargetNode - attempt to recognise PACKSSDW(PACKSSDW(X,Y),PACKSSDW(Z,W)) patterns

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 5 08:43:59 PDT 2023


Author: Simon Pilgrim
Date: 2023-07-05T16:43:43+01:00
New Revision: 38721f29f8b33feefb13121e6d24dc54f345ae73

URL: https://github.com/llvm/llvm-project/commit/38721f29f8b33feefb13121e6d24dc54f345ae73
DIFF: https://github.com/llvm/llvm-project/commit/38721f29f8b33feefb13121e6d24dc54f345ae73.diff

LOG: [X86] ComputeNumSignBitsForTargetNode - attempt to recognise PACKSSDW(PACKSSDW(X,Y),PACKSSDW(Z,W)) patterns

These are often used when we're packing vXi64 comparison results, but we don't have PACKSSQD so have to bitcast, which doesn't work well with num sign bits value tracking.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/vector-reduce-or-bool.ll
    llvm/test/CodeGen/X86/vselect-packss.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index c2aaea7aae95b5..7c687fbb23e2bb 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -39140,12 +39140,30 @@ unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
     getPackDemandedElts(Op.getValueType(), DemandedElts, DemandedLHS,
                         DemandedRHS);
 
+    // Helper to detect PACKSSDW(BITCAST(PACKSSDW(X)),BITCAST(PACKSSDW(Y)))
+    // patterns often used to compact vXi64 allsignbit patterns.
+    auto NumSignBitsPACKSS = [&](SDValue V, const APInt &Elts) -> unsigned {
+      SDValue BC = peekThroughBitcasts(V);
+      if (BC.getOpcode() == X86ISD::PACKSS &&
+          BC.getScalarValueSizeInBits() == 16 &&
+          V.getScalarValueSizeInBits() == 32) {
+        SDValue BC0 = peekThroughBitcasts(BC.getOperand(0));
+        SDValue BC1 = peekThroughBitcasts(BC.getOperand(1));
+        if (BC0.getScalarValueSizeInBits() == 64 &&
+            BC1.getScalarValueSizeInBits() == 64 &&
+            DAG.ComputeNumSignBits(BC0, Depth + 1) == 64 &&
+            DAG.ComputeNumSignBits(BC1, Depth + 1) == 64)
+          return 32;
+      }
+      return DAG.ComputeNumSignBits(V, Elts, Depth + 1);
+    };
+
     unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits();
     unsigned Tmp0 = SrcBits, Tmp1 = SrcBits;
     if (!!DemandedLHS)
-      Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
+      Tmp0 = NumSignBitsPACKSS(Op.getOperand(0), DemandedLHS);
     if (!!DemandedRHS)
-      Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
+      Tmp1 = NumSignBitsPACKSS(Op.getOperand(1), DemandedRHS);
     unsigned Tmp = std::min(Tmp0, Tmp1);
     if (Tmp > (SrcBits - VTBits))
       return Tmp - (SrcBits - VTBits);

diff  --git a/llvm/test/CodeGen/X86/vector-reduce-or-bool.ll b/llvm/test/CodeGen/X86/vector-reduce-or-bool.ll
index a6da42198611c1..6752c4f426becc 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-or-bool.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-or-bool.ll
@@ -1102,7 +1102,7 @@ define i1 @icmp0_v8i64_v8i1(<8 x i64>) {
 ; SSE41-NEXT:    packssdw %xmm1, %xmm0
 ; SSE41-NEXT:    packssdw %xmm2, %xmm0
 ; SSE41-NEXT:    pmovmskb %xmm0, %eax
-; SSE41-NEXT:    testl $43690, %eax # imm = 0xAAAA
+; SSE41-NEXT:    testl %eax, %eax
 ; SSE41-NEXT:    setne %al
 ; SSE41-NEXT:    retq
 ;
@@ -1899,7 +1899,7 @@ define i1 @icmp_v8i64_v8i1(<8 x i64>, <8 x i64>) {
 ; SSE41-NEXT:    packssdw %xmm1, %xmm0
 ; SSE41-NEXT:    packssdw %xmm2, %xmm0
 ; SSE41-NEXT:    pmovmskb %xmm0, %eax
-; SSE41-NEXT:    testl $43690, %eax # imm = 0xAAAA
+; SSE41-NEXT:    testl %eax, %eax
 ; SSE41-NEXT:    setne %al
 ; SSE41-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vselect-packss.ll b/llvm/test/CodeGen/X86/vselect-packss.ll
index f31b986ceb2626..7078eed042b43e 100644
--- a/llvm/test/CodeGen/X86/vselect-packss.ll
+++ b/llvm/test/CodeGen/X86/vselect-packss.ll
@@ -237,6 +237,7 @@ define <16 x i8> @vselect_packss_v16i64(<16 x i64> %a0, <16 x i64> %a1, <16 x i8
 ;
 ; SSE42-LABEL: vselect_packss_v16i64:
 ; SSE42:       # %bb.0:
+; SSE42-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm8
 ; SSE42-NEXT:    pcmpeqq {{[0-9]+}}(%rsp), %xmm7
 ; SSE42-NEXT:    pcmpeqq {{[0-9]+}}(%rsp), %xmm6
 ; SSE42-NEXT:    packssdw %xmm7, %xmm6
@@ -252,22 +253,21 @@ define <16 x i8> @vselect_packss_v16i64(<16 x i64> %a0, <16 x i64> %a1, <16 x i8
 ; SSE42-NEXT:    packssdw %xmm1, %xmm0
 ; SSE42-NEXT:    packssdw %xmm2, %xmm0
 ; SSE42-NEXT:    packsswb %xmm4, %xmm0
-; SSE42-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm1
-; SSE42-NEXT:    pand %xmm0, %xmm1
-; SSE42-NEXT:    pandn {{[0-9]+}}(%rsp), %xmm0
-; SSE42-NEXT:    por %xmm1, %xmm0
+; SSE42-NEXT:    pblendvb %xmm0, {{[0-9]+}}(%rsp), %xmm8
+; SSE42-NEXT:    movdqa %xmm8, %xmm0
 ; SSE42-NEXT:    retq
 ;
 ; AVX1-LABEL: vselect_packss_v16i64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm7, %xmm8
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm9
-; AVX1-NEXT:    vpcmpeqq %xmm8, %xmm9, %xmm8
+; AVX1-NEXT:    vmovdqa {{[0-9]+}}(%rsp), %xmm8
+; AVX1-NEXT:    vextractf128 $1, %ymm7, %xmm9
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm10
+; AVX1-NEXT:    vpcmpeqq %xmm9, %xmm10, %xmm9
 ; AVX1-NEXT:    vpcmpeqq %xmm7, %xmm3, %xmm3
-; AVX1-NEXT:    vpackssdw %xmm8, %xmm3, %xmm3
+; AVX1-NEXT:    vpackssdw %xmm9, %xmm3, %xmm3
 ; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm7
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm8
-; AVX1-NEXT:    vpcmpeqq %xmm7, %xmm8, %xmm7
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm9
+; AVX1-NEXT:    vpcmpeqq %xmm7, %xmm9, %xmm7
 ; AVX1-NEXT:    vpcmpeqq %xmm6, %xmm2, %xmm2
 ; AVX1-NEXT:    vpackssdw %xmm7, %xmm2, %xmm2
 ; AVX1-NEXT:    vpackssdw %xmm3, %xmm2, %xmm2
@@ -283,9 +283,7 @@ define <16 x i8> @vselect_packss_v16i64(<16 x i64> %a0, <16 x i64> %a1, <16 x i8
 ; AVX1-NEXT:    vpackssdw %xmm3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpacksswb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vpand {{[0-9]+}}(%rsp), %xmm0, %xmm1
-; AVX1-NEXT:    vpandn {{[0-9]+}}(%rsp), %xmm0, %xmm0
-; AVX1-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpblendvb %xmm0, {{[0-9]+}}(%rsp), %xmm8, %xmm0
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;


        


More information about the llvm-commits mailing list