[llvm] c2fbc70 - [SelectionDAG] Let ComputeKnownSignBits handle (shl (ext X), C) (#97695)

Bjorn Pettersson via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 5 13:39:33 PDT 2024


Author: Bjorn Pettersson
Date: 2024-07-05T22:37:26+02:00
New Revision: c2fbc701aaf826e2015a5dcab36e3ba792e7da7f

URL: https://github.com/llvm/llvm-project/commit/c2fbc701aaf826e2015a5dcab36e3ba792e7da7f
DIFF: https://github.com/llvm/llvm-project/commit/c2fbc701aaf826e2015a5dcab36e3ba792e7da7f.diff

LOG: [SelectionDAG] Let ComputeKnownSignBits handle (shl (ext X), C) (#97695)

Add simple support for looking through ZEXT/ANYEXT/SEXT when doing
ComputeKnownSignBits for SHL. This is valid for the case when all
extended bits are shifted out, because then the number of sign bits
can be found by analysing the EXT operand.

A future improvement could be to pass along the "shifted left by"
information in the recursive calls to ComputeKnownSignBits. Allowing
us to handle this more generically.

Added: 
    llvm/test/CodeGen/X86/known-signbits-shl.ll

Modified: 
    llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp

Removed: 
    llvm/test/CodeGen/X86/computenumsignbits-shl.ll


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 27c297af39e8c..943d2ddc64246 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -4615,12 +4615,33 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
       Tmp = std::min<uint64_t>(Tmp + *ShAmt, VTBits);
     return Tmp;
   case ISD::SHL:
-    if (std::optional<uint64_t> ShAmt =
-            getValidMaximumShiftAmount(Op, DemandedElts, Depth + 1)) {
+    if (std::optional<ConstantRange> ShAmtRange =
+            getValidShiftAmountRange(Op, DemandedElts, Depth + 1)) {
+      uint64_t MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
+      uint64_t MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
+      // Try to look through ZERO/SIGN/ANY_EXTEND. If all extended bits are
+      // shifted out, then we can compute the number of sign bits for the
+      // operand being extended. A future improvement could be to pass along the
+      // "shifted left by" information in the recursive calls to
+      // ComputeKnownSignBits. Allowing us to handle this more generically.
+      if (ISD::isExtOpcode(Op.getOperand(0).getOpcode())) {
+        SDValue Ext = Op.getOperand(0);
+        EVT ExtVT = Ext.getValueType();
+        SDValue Extendee = Ext.getOperand(0);
+        EVT ExtendeeVT = Extendee.getValueType();
+        uint64_t SizeDifference =
+            ExtVT.getScalarSizeInBits() - ExtendeeVT.getScalarSizeInBits();
+        if (SizeDifference <= MinShAmt) {
+          Tmp = SizeDifference +
+                ComputeNumSignBits(Extendee, DemandedElts, Depth + 1);
+          if (MaxShAmt < Tmp)
+            return Tmp - MaxShAmt;
+        }
+      }
       // shl destroys sign bits, ensure it doesn't shift out all sign bits.
       Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
-      if (*ShAmt < Tmp)
-        return Tmp - *ShAmt;
+      if (MaxShAmt < Tmp)
+        return Tmp - MaxShAmt;
     }
     break;
   case ISD::AND:

diff  --git a/llvm/test/CodeGen/X86/computenumsignbits-shl.ll b/llvm/test/CodeGen/X86/known-signbits-shl.ll
similarity index 79%
rename from llvm/test/CodeGen/X86/computenumsignbits-shl.ll
rename to llvm/test/CodeGen/X86/known-signbits-shl.ll
index 5799bb653ebc6..473fecc307ed4 100644
--- a/llvm/test/CodeGen/X86/computenumsignbits-shl.ll
+++ b/llvm/test/CodeGen/X86/known-signbits-shl.ll
@@ -12,30 +12,12 @@ define void @computeNumSignBits_shl_zext_1(i8 %x, ptr %p) nounwind {
 ; X64-NEXT:    sarb $5, %dil
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    movl %eax, %ecx
-; X64-NEXT:    shll $10, %ecx
-; X64-NEXT:    xorl %edx, %edx
-; X64-NEXT:    testw %cx, %cx
-; X64-NEXT:    sets %dl
-; X64-NEXT:    addl $32767, %edx # imm = 0x7FFF
-; X64-NEXT:    movl %eax, %edi
-; X64-NEXT:    shll $11, %edi
-; X64-NEXT:    movswl %di, %r8d
-; X64-NEXT:    shrl %r8d
-; X64-NEXT:    cmpw %r8w, %cx
-; X64-NEXT:    cmovnel %edx, %edi
-; X64-NEXT:    movw %di, (%rsi)
-; X64-NEXT:    movl %eax, %edi
-; X64-NEXT:    shll $12, %edi
-; X64-NEXT:    movswl %di, %r8d
-; X64-NEXT:    shrl $2, %r8d
-; X64-NEXT:    cmpw %r8w, %cx
-; X64-NEXT:    cmovnel %edx, %edi
-; X64-NEXT:    movw %di, (%rsi)
+; X64-NEXT:    shll $11, %ecx
+; X64-NEXT:    movw %cx, (%rsi)
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    shll $12, %ecx
+; X64-NEXT:    movw %cx, (%rsi)
 ; X64-NEXT:    shll $13, %eax
-; X64-NEXT:    movswl %ax, %edi
-; X64-NEXT:    shrl $3, %edi
-; X64-NEXT:    cmpw %di, %cx
-; X64-NEXT:    cmovnel %edx, %eax
 ; X64-NEXT:    movw %ax, (%rsi)
 ; X64-NEXT:    retq
   %ashr = ashr i8 %x, 5
@@ -88,24 +70,9 @@ define void @computeNumSignBits_shl_zext_vec_1(<2 x i8> %x, ptr %p) nounwind {
 ; X64-NEXT:    movdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
 ; X64-NEXT:    pxor %xmm1, %xmm0
 ; X64-NEXT:    psubb %xmm1, %xmm0
-; X64-NEXT:    pxor %xmm1, %xmm1
-; X64-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; X64-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1024,4096,u,u,u,u,u,u]
-; X64-NEXT:    movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; X64-NEXT:    pand %xmm0, %xmm2
-; X64-NEXT:    pcmpgtw %xmm0, %xmm1
-; X64-NEXT:    pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; X64-NEXT:    por %xmm2, %xmm1
-; X64-NEXT:    movdqa %xmm0, %xmm2
-; X64-NEXT:    paddw %xmm0, %xmm2
-; X64-NEXT:    movdqa %xmm2, %xmm3
-; X64-NEXT:    psraw $1, %xmm3
-; X64-NEXT:    pcmpeqw %xmm0, %xmm3
-; X64-NEXT:    movdqa %xmm3, %xmm0
-; X64-NEXT:    pandn %xmm1, %xmm0
-; X64-NEXT:    pand %xmm2, %xmm3
-; X64-NEXT:    por %xmm0, %xmm3
-; X64-NEXT:    movd %xmm3, (%rdi)
+; X64-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X64-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2048,8192,u,u,u,u,u,u]
+; X64-NEXT:    movd %xmm0, (%rdi)
 ; X64-NEXT:    retq
   %ashr = ashr <2 x i8> %x, <i8 5, i8 5>
   %zext = zext <2 x i8> %ashr to <2 x i16>


        


More information about the llvm-commits mailing list