[llvm] e29c439 - Add tests for folding `(and/or (icmp eq/ne A, Pow2), (icmp eq/ne A, -Pow2))`; NFC

Noah Goldstein via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 14 16:59:19 PST 2023


Author: Noah Goldstein
Date: 2023-02-14T18:59:03-06:00
New Revision: e29c4393236fae95c57b9eb631a806ff47dc38cd

URL: https://github.com/llvm/llvm-project/commit/e29c4393236fae95c57b9eb631a806ff47dc38cd
DIFF: https://github.com/llvm/llvm-project/commit/e29c4393236fae95c57b9eb631a806ff47dc38cd.diff

LOG: Add tests for folding `(and/or (icmp eq/ne A, Pow2), (icmp eq/ne A, -Pow2))`; NFC

Differential Revision: https://reviews.llvm.org/D142343

Added: 
    llvm/test/CodeGen/X86/icmp-pow2-logic-npow2.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/icmp-pow2-logic-npow2.ll b/llvm/test/CodeGen/X86/icmp-pow2-logic-npow2.ll
new file mode 100644
index 000000000000..38ea9aa97d5c
--- /dev/null
+++ b/llvm/test/CodeGen/X86/icmp-pow2-logic-npow2.ll
@@ -0,0 +1,289 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=i686-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=X86
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=X64
+
+declare i64 @llvm.abs.i64(i64, i1)
+declare <2 x i64> @llvm.abs.2xi64(<2 x i64>, i1)
+declare i32 @llvm.abs.i32(i32, i1)
+declare i16 @llvm.abs.i16(i16, i1)
+declare i8 @llvm.abs.i8(i8, i1)
+
+define i1 @eq_pow_or(i32 %0) nounwind {
+; X86-LABEL: eq_pow_or:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl $32, %eax
+; X86-NEXT:    sete %cl
+; X86-NEXT:    cmpl $-32, %eax
+; X86-NEXT:    sete %al
+; X86-NEXT:    orb %cl, %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: eq_pow_or:
+; X64:       # %bb.0:
+; X64-NEXT:    cmpl $32, %edi
+; X64-NEXT:    sete %cl
+; X64-NEXT:    cmpl $-32, %edi
+; X64-NEXT:    sete %al
+; X64-NEXT:    orb %cl, %al
+; X64-NEXT:    retq
+  %2 = icmp eq i32 %0, 32
+  %3 = icmp eq i32 %0, -32
+  %4 = or i1 %2, %3
+  ret i1 %4
+}
+
+define i1 @ne_pow_and(i8 %0) nounwind {
+; X86-LABEL: ne_pow_and:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpb $16, %al
+; X86-NEXT:    setne %cl
+; X86-NEXT:    cmpb $-16, %al
+; X86-NEXT:    setne %al
+; X86-NEXT:    andb %cl, %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: ne_pow_and:
+; X64:       # %bb.0:
+; X64-NEXT:    cmpb $16, %dil
+; X64-NEXT:    setne %cl
+; X64-NEXT:    cmpb $-16, %dil
+; X64-NEXT:    setne %al
+; X64-NEXT:    andb %cl, %al
+; X64-NEXT:    retq
+  %2 = icmp ne i8 %0, 16
+  %3 = icmp ne i8 %0, -16
+  %4 = and i1 %2, %3
+  ret i1 %4
+}
+
+define i1 @eq_pow_mismatch_or(i32 %0) nounwind {
+; X86-LABEL: eq_pow_mismatch_or:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl $16, %eax
+; X86-NEXT:    sete %cl
+; X86-NEXT:    cmpl $-32, %eax
+; X86-NEXT:    sete %al
+; X86-NEXT:    orb %cl, %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: eq_pow_mismatch_or:
+; X64:       # %bb.0:
+; X64-NEXT:    cmpl $16, %edi
+; X64-NEXT:    sete %cl
+; X64-NEXT:    cmpl $-32, %edi
+; X64-NEXT:    sete %al
+; X64-NEXT:    orb %cl, %al
+; X64-NEXT:    retq
+  %2 = icmp eq i32 %0, 16
+  %3 = icmp eq i32 %0, -32
+  %4 = or i1 %2, %3
+  ret i1 %4
+}
+
+define i1 @ne_non_pow_and(i8 %0) nounwind {
+; X86-LABEL: ne_non_pow_and:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpb $17, %al
+; X86-NEXT:    setne %cl
+; X86-NEXT:    cmpb $-17, %al
+; X86-NEXT:    setne %al
+; X86-NEXT:    andb %cl, %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: ne_non_pow_and:
+; X64:       # %bb.0:
+; X64-NEXT:    cmpb $17, %dil
+; X64-NEXT:    setne %cl
+; X64-NEXT:    cmpb $-17, %dil
+; X64-NEXT:    setne %al
+; X64-NEXT:    andb %cl, %al
+; X64-NEXT:    retq
+  %2 = icmp ne i8 %0, 17
+  %3 = icmp ne i8 %0, -17
+  %4 = and i1 %2, %3
+  ret i1 %4
+}
+
+define i1 @ne_pow_or(i32 %0) nounwind {
+; X86-LABEL: ne_pow_or:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    xorl $32, %ecx
+; X86-NEXT:    xorl $-32, %eax
+; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    setne %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: ne_pow_or:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    xorl $32, %eax
+; X64-NEXT:    xorl $-32, %edi
+; X64-NEXT:    orl %eax, %edi
+; X64-NEXT:    setne %al
+; X64-NEXT:    retq
+  %2 = icmp ne i32 %0, 32
+  %3 = icmp ne i32 %0, -32
+  %4 = or i1 %2, %3
+  ret i1 %4
+}
+
+define i1 @eq_pow_and(i8 %0) nounwind {
+; X86-LABEL: eq_pow_and:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    xorb $16, %cl
+; X86-NEXT:    xorb $-16, %al
+; X86-NEXT:    orb %cl, %al
+; X86-NEXT:    sete %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: eq_pow_and:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    xorb $16, %al
+; X64-NEXT:    xorb $-16, %dil
+; X64-NEXT:    orb %al, %dil
+; X64-NEXT:    sete %al
+; X64-NEXT:    retq
+  %2 = icmp eq i8 %0, 16
+  %3 = icmp eq i8 %0, -16
+  %4 = and i1 %2, %3
+  ret i1 %4
+}
+
+define i1 @abs_eq_pow2(i32 %0) nounwind {
+; X86-LABEL: abs_eq_pow2:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    sarl $31, %ecx
+; X86-NEXT:    xorl %ecx, %eax
+; X86-NEXT:    subl %ecx, %eax
+; X86-NEXT:    cmpl $4, %eax
+; X86-NEXT:    sete %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: abs_eq_pow2:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    negl %eax
+; X64-NEXT:    cmovsl %edi, %eax
+; X64-NEXT:    cmpl $4, %eax
+; X64-NEXT:    sete %al
+; X64-NEXT:    retq
+  %2 = tail call i32 @llvm.abs.i32(i32 %0, i1 true)
+  %3 = icmp eq i32 %2, 4
+  ret i1 %3
+}
+
+define i1 @abs_ne_pow2(i64 %0) nounwind {
+; X86-LABEL: abs_ne_pow2:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    sarl $31, %ecx
+; X86-NEXT:    xorl %ecx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    xorl %ecx, %edx
+; X86-NEXT:    subl %ecx, %edx
+; X86-NEXT:    sbbl %ecx, %eax
+; X86-NEXT:    xorl $2, %edx
+; X86-NEXT:    orl %eax, %edx
+; X86-NEXT:    setne %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: abs_ne_pow2:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    negq %rax
+; X64-NEXT:    cmovsq %rdi, %rax
+; X64-NEXT:    cmpq $2, %rax
+; X64-NEXT:    setne %al
+; X64-NEXT:    retq
+  %2 = tail call i64 @llvm.abs.i64(i64 %0, i1 true)
+  %3 = icmp ne i64 %2, 2
+  ret i1 %3
+}
+
+define i1 @abs_ne_nonpow2(i16 %0) nounwind {
+; X86-LABEL: abs_ne_nonpow2:
+; X86:       # %bb.0:
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movswl %ax, %ecx
+; X86-NEXT:    sarl $15, %ecx
+; X86-NEXT:    xorl %ecx, %eax
+; X86-NEXT:    subl %ecx, %eax
+; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    cmpl $57344, %eax # imm = 0xE000
+; X86-NEXT:    setne %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: abs_ne_nonpow2:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    negw %ax
+; X64-NEXT:    cmovsw %di, %ax
+; X64-NEXT:    movzwl %ax, %eax
+; X64-NEXT:    cmpl $57344, %eax # imm = 0xE000
+; X64-NEXT:    setne %al
+; X64-NEXT:    retq
+  %2 = tail call i16 @llvm.abs.i16(i16 %0, i1 true)
+  %3 = icmp ne i16 %2, -8192
+  ret i1 %3
+}
+
+define <2 x i1> @abs_ne_vec(<2 x i64> %0) nounwind {
+; X86-LABEL: abs_ne_vec:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    sarl $31, %esi
+; X86-NEXT:    xorl %esi, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    xorl %esi, %edx
+; X86-NEXT:    subl %esi, %edx
+; X86-NEXT:    sbbl %esi, %ecx
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    sarl $31, %esi
+; X86-NEXT:    xorl %esi, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    xorl %esi, %edi
+; X86-NEXT:    subl %esi, %edi
+; X86-NEXT:    sbbl %esi, %eax
+; X86-NEXT:    xorl $8, %edi
+; X86-NEXT:    orl %eax, %edi
+; X86-NEXT:    setne %al
+; X86-NEXT:    xorl $8, %edx
+; X86-NEXT:    orl %ecx, %edx
+; X86-NEXT:    setne %dl
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    retl
+;
+; X64-LABEL: abs_ne_vec:
+; X64:       # %bb.0:
+; X64-NEXT:    movdqa %xmm0, %xmm1
+; X64-NEXT:    psrad $31, %xmm1
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; X64-NEXT:    pxor %xmm1, %xmm0
+; X64-NEXT:    psubq %xmm1, %xmm0
+; X64-NEXT:    pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
+; X64-NEXT:    pand %xmm1, %xmm0
+; X64-NEXT:    pcmpeqd %xmm1, %xmm1
+; X64-NEXT:    pxor %xmm1, %xmm0
+; X64-NEXT:    retq
+  %2 = tail call <2 x i64> @llvm.abs.2xi64(<2 x i64> %0, i1 true)
+  %3 = icmp ne <2 x i64> %2, <i64 8, i64 8>
+  ret <2 x i1> %3
+}


        


More information about the llvm-commits mailing list