[llvm] 48f45f6 - [X86] Limit mul(x,x) knownbits tests with not undef/poison check

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 31 03:55:26 PST 2022


Author: Simon Pilgrim
Date: 2022-01-31T11:55:10Z
New Revision: 48f45f6b25f086e5c949648160b0784370435f9f

URL: https://github.com/llvm/llvm-project/commit/48f45f6b25f086e5c949648160b0784370435f9f
DIFF: https://github.com/llvm/llvm-project/commit/48f45f6b25f086e5c949648160b0784370435f9f.diff

LOG: [X86] Limit mul(x,x) knownbits tests with not undef/poison check

We can only assume bit[1] == zero if its the only demanded bit or the source is not undef/poison

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
    llvm/test/CodeGen/X86/combine-mul.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 45f3005e8f570..240dcca654aee 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -3082,6 +3082,8 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
     bool SelfMultiply = Op.getOperand(0) == Op.getOperand(1);
+    SelfMultiply &= isGuaranteedNotToBeUndefOrPoison(
+        Op.getOperand(0), DemandedElts, false, Depth + 1);
     Known = KnownBits::mul(Known, Known2, SelfMultiply);
     break;
   }

diff  --git a/llvm/test/CodeGen/X86/combine-mul.ll b/llvm/test/CodeGen/X86/combine-mul.ll
index 57d85e1c1c9ed..f0254e784cfc6 100644
--- a/llvm/test/CodeGen/X86/combine-mul.ll
+++ b/llvm/test/CodeGen/X86/combine-mul.ll
@@ -366,12 +366,16 @@ define <2 x i64> @combine_mul_to_abs_v2i64(<2 x i64> %x) {
 define i64 @combine_mul_self_knownbits(i64 %x) {
 ; SSE-LABEL: combine_mul_self_knownbits:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    movq %rdi, %rax
+; SSE-NEXT:    imull %eax, %eax
+; SSE-NEXT:    andl $2, %eax
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_mul_self_knownbits:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    movq %rdi, %rax
+; AVX-NEXT:    imull %eax, %eax
+; AVX-NEXT:    andl $2, %eax
 ; AVX-NEXT:    retq
   %1 = mul i64 %x, %x
   %2 = and i64 %1, 2
@@ -381,12 +385,15 @@ define i64 @combine_mul_self_knownbits(i64 %x) {
 define <4 x i32> @combine_mul_self_knownbits_vector(<4 x i32> %x) {
 ; SSE-LABEL: combine_mul_self_knownbits_vector:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    xorps %xmm0, %xmm0
+; SSE-NEXT:    pmulld %xmm0, %xmm0
+; SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_mul_self_knownbits_vector:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vpmulld %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [2,2,2,2]
+; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = mul <4 x i32> %x, %x
   %2 = and <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
@@ -400,12 +407,14 @@ define i64 @combine_mul_self_demandedbits(i64 %x) {
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    movq %rdi, %rax
 ; SSE-NEXT:    imulq %rdi, %rax
+; SSE-NEXT:    andq $-3, %rax
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_mul_self_demandedbits:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    movq %rdi, %rax
 ; AVX-NEXT:    imulq %rdi, %rax
+; AVX-NEXT:    andq $-3, %rax
 ; AVX-NEXT:    retq
   %1 = mul i64 %x, %x
   %2 = and i64 %1, -3


        


More information about the llvm-commits mailing list