[llvm] 6523e2f - [X86] SimplifyDemandedBitsForTargetNode - improve TESTPS/TESTPD handling for duplicated operands

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Apr 14 04:04:13 PDT 2023


Author: Simon Pilgrim
Date: 2023-04-14T12:03:38+01:00
New Revision: 6523e2f4f930a3e1b9c02d7c45b1859dba098f16

URL: https://github.com/llvm/llvm-project/commit/6523e2f4f930a3e1b9c02d7c45b1859dba098f16
DIFF: https://github.com/llvm/llvm-project/commit/6523e2f4f930a3e1b9c02d7c45b1859dba098f16.diff

LOG: [X86] SimplifyDemandedBitsForTargetNode - improve TESTPS/TESTPD handling for duplicated operands

We often repeat the vector operand in TESTPS(X,X)/TESTPD(X,X) for anyof comparisons - ensure we still only demand the sign bits when the TESTP is the only user of that operand

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/combine-testpd.ll
    llvm/test/CodeGen/X86/combine-testps.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 9036c8b43b376..059de932f7656 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -43784,8 +43784,11 @@ bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
     // TESTPS/TESTPD only demands the sign bits of ALL the elements.
     KnownBits KnownSrc;
     APInt SignMask = APInt::getSignMask(OpVT.getScalarSizeInBits());
-    return SimplifyDemandedBits(Op0, SignMask, KnownSrc, TLO, Depth + 1) ||
-           SimplifyDemandedBits(Op1, SignMask, KnownSrc, TLO, Depth + 1);
+    bool AssumeSingleUse = (Op0 == Op1) && Op->isOnlyUserOf(Op0.getNode());
+    return SimplifyDemandedBits(Op0, SignMask, KnownSrc, TLO, Depth + 1,
+                                AssumeSingleUse) ||
+           SimplifyDemandedBits(Op1, SignMask, KnownSrc, TLO, Depth + 1,
+                                AssumeSingleUse);
   }
   case X86ISD::BEXTR:
   case X86ISD::BEXTRI: {

diff  --git a/llvm/test/CodeGen/X86/combine-testpd.ll b/llvm/test/CodeGen/X86/combine-testpd.ll
index 0e0df828e9994..842af121ee545 100644
--- a/llvm/test/CodeGen/X86/combine-testpd.ll
+++ b/llvm/test/CodeGen/X86/combine-testpd.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx  | FileCheck %s --check-prefixes=CHECK,AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx  | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s
 
 ;
 ; testz(~X,Y) -> testc(X,Y)
@@ -189,26 +189,13 @@ define i32 @testpdz_256_signbit(<4 x double> %c, <4 x double> %d, i32 %a, i32 %b
 }
 
 define i32 @testpdnzc_256_signbit_multiuse(<4 x double> %c, i32 %a, i32 %b) {
-; AVX1-LABEL: testpdnzc_256_signbit_multiuse:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    movl %edi, %eax
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm2, %xmm1
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT:    vtestpd %ymm0, %ymm0
-; AVX1-NEXT:    cmovnel %esi, %eax
-; AVX1-NEXT:    vzeroupper
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: testpdnzc_256_signbit_multiuse:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    movl %edi, %eax
-; AVX2-NEXT:    vtestpd %ymm0, %ymm0
-; AVX2-NEXT:    cmovnel %esi, %eax
-; AVX2-NEXT:    vzeroupper
-; AVX2-NEXT:    retq
+; CHECK-LABEL: testpdnzc_256_signbit_multiuse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    vtestpd %ymm0, %ymm0
+; CHECK-NEXT:    cmovnel %esi, %eax
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
   %t0 = bitcast <4 x double> %c to <4 x i64>
   %t1 = icmp sgt <4 x i64> zeroinitializer, %t0
   %t2 = sext <4 x i1> %t1 to <4 x i64>

diff  --git a/llvm/test/CodeGen/X86/combine-testps.ll b/llvm/test/CodeGen/X86/combine-testps.ll
index 2817ffbe0bdc7..b35ec391b29e1 100644
--- a/llvm/test/CodeGen/X86/combine-testps.ll
+++ b/llvm/test/CodeGen/X86/combine-testps.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx  | FileCheck %s --check-prefixes=CHECK,AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx  | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s
 
 ;
 ; testz(~X,Y) -> testc(X,Y)
@@ -189,25 +189,13 @@ define i32 @testpsnzc_256_signbit(<8 x float> %c, <8 x float> %d, i32 %a, i32 %b
 }
 
 define i32 @testpsc_256_signbit_multiuse(<8 x float> %c, i32 %a, i32 %b) {
-; AVX1-LABEL: testpsc_256_signbit_multiuse:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    movl %edi, %eax
-; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT:    vtestps %ymm0, %ymm0
-; AVX1-NEXT:    cmovnel %esi, %eax
-; AVX1-NEXT:    vzeroupper
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: testpsc_256_signbit_multiuse:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    movl %edi, %eax
-; AVX2-NEXT:    vtestps %ymm0, %ymm0
-; AVX2-NEXT:    cmovnel %esi, %eax
-; AVX2-NEXT:    vzeroupper
-; AVX2-NEXT:    retq
+; CHECK-LABEL: testpsc_256_signbit_multiuse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    vtestps %ymm0, %ymm0
+; CHECK-NEXT:    cmovnel %esi, %eax
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
   %t0 = bitcast <8 x float> %c to <8 x i32>
   %t1 = ashr <8 x i32> %t0, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
   %t2 = bitcast <8 x i32> %t1 to <8 x float>


        


More information about the llvm-commits mailing list