[llvm] e3deb7d - [X86] computeKnownBitsForTargetNode - add X86ISD::AND KnownBits handling

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 16 04:08:30 PDT 2022


Author: Simon Pilgrim
Date: 2022-03-16T11:05:36Z
New Revision: e3deb7d88b0cd6d15a7b5a345d94c37ff6262bb4

URL: https://github.com/llvm/llvm-project/commit/e3deb7d88b0cd6d15a7b5a345d94c37ff6262bb4
DIFF: https://github.com/llvm/llvm-project/commit/e3deb7d88b0cd6d15a7b5a345d94c37ff6262bb4.diff

LOG: [X86] computeKnownBitsForTargetNode - add X86ISD::AND KnownBits handling

Fixes #54171

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/vector-unsigned-cmp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index a6e4b79b98cf9..80014359e82e8 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -36269,6 +36269,15 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
     }
     break;
   }
+  case X86ISD::AND: {
+    if (Op.getResNo() == 0) {
+      KnownBits Known2;
+      Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
+      Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+      Known &= Known2;
+    }
+    break;
+  }
   case X86ISD::ANDNP: {
     KnownBits Known2;
     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);

diff  --git a/llvm/test/CodeGen/X86/vector-unsigned-cmp.ll b/llvm/test/CodeGen/X86/vector-unsigned-cmp.ll
index 4013ef9604cd3..3176b5ff6e3d2 100644
--- a/llvm/test/CodeGen/X86/vector-unsigned-cmp.ll
+++ b/llvm/test/CodeGen/X86/vector-unsigned-cmp.ll
@@ -535,55 +535,28 @@ define <8 x i16> @PR47448_ugt(i16 signext %0) {
   ret <8 x i16> %6
 }
 
-; FIXME: Recognise the knownbits from X86ISD::AND in previous block.
+; Recognise the knownbits from X86ISD::AND in previous block.
 define void @PR54171(<4 x i64>* %mask0, <4 x i64>* %mask1, i64 %i) {
-; SSE2-LABEL: PR54171:
-; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    andq $7, %rdx
-; SSE2-NEXT:    je .LBB18_2
-; SSE2-NEXT:  # %bb.1: # %if.then
-; SSE2-NEXT:    movd %edx, %xmm0
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; SSE2-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    movdqa %xmm0, %xmm1
-; SSE2-NEXT:    pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT:    movdqa %xmm0, %xmm2
-; SSE2-NEXT:    pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; SSE2-NEXT:    movdqa %xmm2, (%rdi)
-; SSE2-NEXT:    movdqa %xmm1, 16(%rdi)
-; SSE2-NEXT:    movdqa %xmm0, %xmm1
-; SSE2-NEXT:    pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT:    pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    movdqa %xmm0, (%rsi)
-; SSE2-NEXT:    movdqa %xmm1, 16(%rsi)
-; SSE2-NEXT:  .LBB18_2: # %if.end
-; SSE2-NEXT:    retq
-;
-; SSE41-LABEL: PR54171:
-; SSE41:       # %bb.0: # %entry
-; SSE41-NEXT:    andq $7, %rdx
-; SSE41-NEXT:    je .LBB18_2
-; SSE41-NEXT:  # %bb.1: # %if.then
-; SSE41-NEXT:    movd %edx, %xmm0
-; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [3,3,4,4]
-; SSE41-NEXT:    pmaxud %xmm0, %xmm1
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [1,1,2,2]
-; SSE41-NEXT:    pmaxud %xmm0, %xmm2
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT:    movdqa %xmm2, (%rdi)
-; SSE41-NEXT:    movdqa %xmm1, 16(%rdi)
-; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [7,7,8,8]
-; SSE41-NEXT:    pmaxud %xmm0, %xmm1
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [5,5,6,6]
-; SSE41-NEXT:    pmaxud %xmm0, %xmm2
-; SSE41-NEXT:    pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT:    movdqa %xmm2, (%rsi)
-; SSE41-NEXT:    movdqa %xmm1, 16(%rsi)
-; SSE41-NEXT:  .LBB18_2: # %if.end
-; SSE41-NEXT:    retq
+; SSE-LABEL: PR54171:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    andq $7, %rdx
+; SSE-NEXT:    je .LBB18_2
+; SSE-NEXT:  # %bb.1: # %if.then
+; SSE-NEXT:    movd %edx, %xmm0
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT:    movdqa %xmm2, (%rdi)
+; SSE-NEXT:    movdqa %xmm1, 16(%rdi)
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE-NEXT:    pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT:    movdqa %xmm0, (%rsi)
+; SSE-NEXT:    movdqa %xmm1, 16(%rsi)
+; SSE-NEXT:  .LBB18_2: # %if.end
+; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: PR54171:
 ; AVX1:       # %bb.0: # %entry
@@ -592,16 +565,12 @@ define void @PR54171(<4 x i64>* %mask0, <4 x i64>* %mask1, i64 %i) {
 ; AVX1-NEXT:  # %bb.1: # %if.then
 ; AVX1-NEXT:    vmovd %edx, %xmm0
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; AVX1-NEXT:    vpmaxud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
-; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpmaxud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
-; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm2
+; AVX1-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX1-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
 ; AVX1-NEXT:    vmovdqa %xmm2, (%rdi)
 ; AVX1-NEXT:    vmovdqa %xmm1, 16(%rdi)
-; AVX1-NEXT:    vpmaxud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
-; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpmaxud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
-; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX1-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vmovdqa %xmm0, (%rsi)
 ; AVX1-NEXT:    vmovdqa %xmm1, 16(%rsi)
 ; AVX1-NEXT:  .LBB18_2: # %if.end
@@ -614,11 +583,9 @@ define void @PR54171(<4 x i64>* %mask0, <4 x i64>* %mask1, i64 %i) {
 ; AVX2-NEXT:  # %bb.1: # %if.then
 ; AVX2-NEXT:    vmovd %edx, %xmm0
 ; AVX2-NEXT:    vpbroadcastd %xmm0, %ymm0
-; AVX2-NEXT:    vpmaxud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
-; AVX2-NEXT:    vpcmpeqd %ymm1, %ymm0, %ymm1
+; AVX2-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
 ; AVX2-NEXT:    vmovdqa %ymm1, (%rdi)
-; AVX2-NEXT:    vpmaxud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
-; AVX2-NEXT:    vpcmpeqd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX2-NEXT:    vmovdqa %ymm0, (%rsi)
 ; AVX2-NEXT:  .LBB18_2: # %if.end
 ; AVX2-NEXT:    vzeroupper


        


More information about the llvm-commits mailing list