[llvm] a5f637b - [X86] Fold AND(SRL(X,Y),1) -> SETCC(BT(X,Y))

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Apr 1 08:08:22 PDT 2022


Author: Simon Pilgrim
Date: 2022-04-01T16:07:56+01:00
New Revision: a5f637bcbb7d1e08ce637f113fc117c3f4b2b110

URL: https://github.com/llvm/llvm-project/commit/a5f637bcbb7d1e08ce637f113fc117c3f4b2b110
DIFF: https://github.com/llvm/llvm-project/commit/a5f637bcbb7d1e08ce637f113fc117c3f4b2b110.diff

LOG: [X86] Fold AND(SRL(X,Y),1) -> SETCC(BT(X,Y))

As noticed on PR39174, if we're extracting a single non-constant bit index, then try to use BT+SETCC instead to avoid messing around moving the shift amount to the ECX register, using slow x86 shift ops etc.

Differential Revision: https://reviews.llvm.org/D122891

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/setcc.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 1a4be3be34199..3661de89e8070 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -47324,6 +47324,19 @@ static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
   if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
     return R;
 
+  // Fold AND(SRL(X,Y),1) -> SETCC(BT(X,Y), COND_B) iff Y is not a constant
+  // avoids slow variable shift (moving shift amount to ECX etc.)
+  if (isOneConstant(N1) && N0->hasOneUse()) {
+    SDValue Src = N0;
+    while ((Src.getOpcode() == ISD::ZERO_EXTEND ||
+            Src.getOpcode() == ISD::TRUNCATE) &&
+           Src.getOperand(0)->hasOneUse())
+      Src = Src.getOperand(0);
+    if (Src.getOpcode() == ISD::SRL && !isa<ConstantSDNode>(Src.getOperand(1)))
+      if (SDValue BT = getBT(Src.getOperand(0), Src.getOperand(1), dl, DAG))
+        return getSETCC(X86::COND_B, BT, dl, DAG);
+  }
+
   if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
     // Attempt to recursively combine a bitmask AND with shuffles.
     SDValue Op(N, 0);

diff  --git a/llvm/test/CodeGen/X86/setcc.ll b/llvm/test/CodeGen/X86/setcc.ll
index 57431887f58c6..229632b25dcf5 100644
--- a/llvm/test/CodeGen/X86/setcc.ll
+++ b/llvm/test/CodeGen/X86/setcc.ll
@@ -139,19 +139,17 @@ define zeroext i1 @t6(i32 %a) #0 {
 define zeroext i1 @t7(i32 %0) {
 ; X86-LABEL: t7:
 ; X86:       ## %bb.0:
-; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; X86-NEXT:    movb $19, %al
-; X86-NEXT:    shrb %cl, %al
-; X86-NEXT:    andb $1, %al
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl $19, %ecx
+; X86-NEXT:    btl %eax, %ecx
+; X86-NEXT:    setb %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: t7:
 ; X64:       ## %bb.0:
-; X64-NEXT:    movl %edi, %ecx
-; X64-NEXT:    movb $19, %al
-; X64-NEXT:    ## kill: def $cl killed $cl killed $ecx
-; X64-NEXT:    shrb %cl, %al
-; X64-NEXT:    andb $1, %al
+; X64-NEXT:    movl $19, %eax
+; X64-NEXT:    btl %edi, %eax
+; X64-NEXT:    setb %al
 ; X64-NEXT:    retq
   %2 = trunc i32 %0 to i5
   %3 = lshr i5 -13, %2
@@ -163,20 +161,16 @@ define zeroext i1 @t7(i32 %0) {
 define zeroext i1 @t8(i8 %0, i8 %1) {
 ; X86-LABEL: t8:
 ; X86:       ## %bb.0:
-; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
-; X86-NEXT:    shrb %cl, %al
-; X86-NEXT:    andb $1, %al
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    btl %eax, %ecx
+; X86-NEXT:    setb %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: t8:
 ; X64:       ## %bb.0:
-; X64-NEXT:    movl %esi, %ecx
-; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    ## kill: def $cl killed $cl killed $ecx
-; X64-NEXT:    shrb %cl, %al
-; X64-NEXT:    andb $1, %al
-; X64-NEXT:    ## kill: def $al killed $al killed $eax
+; X64-NEXT:    btl %esi, %edi
+; X64-NEXT:    setb %al
 ; X64-NEXT:    retq
   %3 = lshr i8 %0, %1
   %4 = and i8 %3, 1


        


More information about the llvm-commits mailing list