[llvm] 8421756 - [X86] Add additional test cases for NOT(AND(SRL(X,Y),1))/AND(SRL(NOT(X(,Y),1) -> SETCC(BT(X,Y))

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 4 02:32:09 PDT 2022


Author: Simon Pilgrim
Date: 2022-04-04T10:29:33+01:00
New Revision: 842175676c6cbf740896567f22d81206d1e393c7

URL: https://github.com/llvm/llvm-project/commit/842175676c6cbf740896567f22d81206d1e393c7
DIFF: https://github.com/llvm/llvm-project/commit/842175676c6cbf740896567f22d81206d1e393c7.diff

LOG: [X86] Add additional test cases for NOT(AND(SRL(X,Y),1))/AND(SRL(NOT(X(,Y),1) -> SETCC(BT(X,Y))

As suggested in post review on D122891

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/setcc.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/setcc.ll b/llvm/test/CodeGen/X86/setcc.ll
index eede7310af870..f538069ae2282 100644
--- a/llvm/test/CodeGen/X86/setcc.ll
+++ b/llvm/test/CodeGen/X86/setcc.ll
@@ -202,6 +202,83 @@ define i64 @t9(i32 %0, i32 %1) {
   ret i64 %6
 }
 
+define i32 @t10(i32 %0, i32 %1) {
+; X86-LABEL: t10:
+; X86:       ## %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    shrl %cl, %eax
+; X86-NEXT:    notl %eax
+; X86-NEXT:    andl $1, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: t10:
+; X64:       ## %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    ## kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shrl %cl, %eax
+; X64-NEXT:    notl %eax
+; X64-NEXT:    andl $1, %eax
+; X64-NEXT:    retq
+  %3 = lshr i32 %0, %1
+  %4 = and i32 %3, 1
+  %5 = xor i32 %4, 1
+  ret i32 %5
+}
+
+define i32 @t11(i32 %0, i32 %1) {
+; X86-LABEL: t11:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    notl %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    btl %edx, %ecx
+; X86-NEXT:    setb %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: t11:
+; X64:       ## %bb.0:
+; X64-NEXT:    notl %edi
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    btl %esi, %edi
+; X64-NEXT:    setb %al
+; X64-NEXT:    retq
+  %3 = xor i32 %0, -1
+  %4 = lshr i32 %3, %1
+  %5 = and i32 %4, 1
+  ret i32 %5
+}
+
+define i32 @t12(i32 %0, i32 %1) {
+; X86-LABEL: t12:
+; X86:       ## %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    notl %eax
+; X86-NEXT:    shrl %cl, %eax
+; X86-NEXT:    notl %eax
+; X86-NEXT:    andl $1, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: t12:
+; X64:       ## %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    notl %eax
+; X64-NEXT:    ## kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shrl %cl, %eax
+; X64-NEXT:    notl %eax
+; X64-NEXT:    andl $1, %eax
+; X64-NEXT:    retq
+  %3 = xor i32 %0, -1
+  %4 = lshr i32 %3, %1
+  %5 = and i32 %4, 1
+  %6 = xor i32 %5, 1
+  ret i32 %6
+}
+
 define i16 @shift_and(i16 %a) {
 ; X86-LABEL: shift_and:
 ; X86:       ## %bb.0:


        


More information about the llvm-commits mailing list