[llvm] 4ead589 - [X86] add-and-not.ll - add 32-bit test coverage

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 15 08:13:14 PDT 2023


Author: Simon Pilgrim
Date: 2023-03-15T15:13:02Z
New Revision: 4ead58914cc3f04f2dcc066d542ff220831dcc70

URL: https://github.com/llvm/llvm-project/commit/4ead58914cc3f04f2dcc066d542ff220831dcc70
DIFF: https://github.com/llvm/llvm-project/commit/4ead58914cc3f04f2dcc066d542ff220831dcc70.diff

LOG: [X86] add-and-not.ll - add 32-bit test coverage

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/add-and-not.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/add-and-not.ll b/llvm/test/CodeGen/X86/add-and-not.ll
index 99bfb94e689e2..d4dc732ce782c 100644
--- a/llvm/test/CodeGen/X86/add-and-not.ll
+++ b/llvm/test/CodeGen/X86/add-and-not.ll
@@ -1,15 +1,22 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
+; RUN: llc < %s -mtriple=i686-linux | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefixes=X64
 
 declare void @use(i8)
 
 define i8 @add_and_xor(i8 %x, i8 %y) {
-; CHECK-LABEL: add_and_xor:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    orl %esi, %eax
-; CHECK-NEXT:    # kill: def $al killed $al killed $eax
-; CHECK-NEXT:    retq
+; X86-LABEL: add_and_xor:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    orb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_and_xor:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    orl %esi, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
   %xor = xor i8 %x, -1
   %and = and i8 %xor, %y
   %add = add i8 %and, %x
@@ -17,13 +24,22 @@ define i8 @add_and_xor(i8 %x, i8 %y) {
 }
 
 define i8 @add_and_xor_wrong_const(i8 %x, i8 %y) {
-; CHECK-LABEL: add_and_xor_wrong_const:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    xorb $-2, %al
-; CHECK-NEXT:    andb %sil, %al
-; CHECK-NEXT:    addb %dil, %al
-; CHECK-NEXT:    retq
+; X86-LABEL: add_and_xor_wrong_const:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    xorb $-2, %al
+; X86-NEXT:    andb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    addb %cl, %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_and_xor_wrong_const:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    xorb $-2, %al
+; X64-NEXT:    andb %sil, %al
+; X64-NEXT:    addb %dil, %al
+; X64-NEXT:    retq
   %xor = xor i8 %x, -2
   %and = and i8 %xor, %y
   %add = add i8 %and, %x
@@ -31,15 +47,23 @@ define i8 @add_and_xor_wrong_const(i8 %x, i8 %y) {
 }
 
 define i8 @add_and_xor_wrong_op(i8 %x, i8 %y, i8 %z) {
-; CHECK-LABEL: add_and_xor_wrong_op:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $edx killed $edx def $rdx
-; CHECK-NEXT:    # kill: def $edi killed $edi def $rdi
-; CHECK-NEXT:    notb %dl
-; CHECK-NEXT:    andb %sil, %dl
-; CHECK-NEXT:    leal (%rdx,%rdi), %eax
-; CHECK-NEXT:    # kill: def $al killed $al killed $eax
-; CHECK-NEXT:    retq
+; X86-LABEL: add_and_xor_wrong_op:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    notb %al
+; X86-NEXT:    andb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    addb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_and_xor_wrong_op:
+; X64:       # %bb.0:
+; X64-NEXT:    # kill: def $edx killed $edx def $rdx
+; X64-NEXT:    # kill: def $edi killed $edi def $rdi
+; X64-NEXT:    notb %dl
+; X64-NEXT:    andb %sil, %dl
+; X64-NEXT:    leal (%rdx,%rdi), %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
   %xor = xor i8 %z, -1
   %and = and i8 %xor, %y
   %add = add i8 %and, %x
@@ -47,12 +71,18 @@ define i8 @add_and_xor_wrong_op(i8 %x, i8 %y, i8 %z) {
 }
 
 define i8 @add_and_xor_commuted1(i8 %x, i8 %y) {
-; CHECK-LABEL: add_and_xor_commuted1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    orl %esi, %eax
-; CHECK-NEXT:    # kill: def $al killed $al killed $eax
-; CHECK-NEXT:    retq
+; X86-LABEL: add_and_xor_commuted1:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    orb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_and_xor_commuted1:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    orl %esi, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
   %xor = xor i8 %x, -1
   %and = and i8 %y, %xor
   %add = add i8 %and, %x
@@ -60,12 +90,18 @@ define i8 @add_and_xor_commuted1(i8 %x, i8 %y) {
 }
 
 define i8 @add_and_xor_commuted2(i8 %x, i8 %y) {
-; CHECK-LABEL: add_and_xor_commuted2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    orl %esi, %eax
-; CHECK-NEXT:    # kill: def $al killed $al killed $eax
-; CHECK-NEXT:    retq
+; X86-LABEL: add_and_xor_commuted2:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    orb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_and_xor_commuted2:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    orl %esi, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
   %xor = xor i8 %x, -1
   %and = and i8 %xor, %y
   %add = add i8 %x, %and
@@ -73,12 +109,18 @@ define i8 @add_and_xor_commuted2(i8 %x, i8 %y) {
 }
 
 define i8 @add_and_xor_commuted3(i8 %x, i8 %y) {
-; CHECK-LABEL: add_and_xor_commuted3:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    orl %esi, %eax
-; CHECK-NEXT:    # kill: def $al killed $al killed $eax
-; CHECK-NEXT:    retq
+; X86-LABEL: add_and_xor_commuted3:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    orb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_and_xor_commuted3:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    orl %esi, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
   %xor = xor i8 %x, -1
   %and = and i8 %y, %xor
   %add = add i8 %x, %and
@@ -86,27 +128,47 @@ define i8 @add_and_xor_commuted3(i8 %x, i8 %y) {
 }
 
 define i8 @add_and_xor_extra_use(i8 %x, i8 %y) nounwind {
-; CHECK-LABEL: add_and_xor_extra_use:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pushq %rbp
-; CHECK-NEXT:    pushq %r14
-; CHECK-NEXT:    pushq %rbx
-; CHECK-NEXT:    movl %esi, %ebx
-; CHECK-NEXT:    movl %edi, %ebp
-; CHECK-NEXT:    movl %ebp, %eax
-; CHECK-NEXT:    notb %al
-; CHECK-NEXT:    movzbl %al, %r14d
-; CHECK-NEXT:    movl %r14d, %edi
-; CHECK-NEXT:    callq use at PLT
-; CHECK-NEXT:    andb %bl, %r14b
-; CHECK-NEXT:    movzbl %r14b, %edi
-; CHECK-NEXT:    callq use at PLT
-; CHECK-NEXT:    orb %bpl, %bl
-; CHECK-NEXT:    movl %ebx, %eax
-; CHECK-NEXT:    popq %rbx
-; CHECK-NEXT:    popq %r14
-; CHECK-NEXT:    popq %rbp
-; CHECK-NEXT:    retq
+; X86-LABEL: add_and_xor_extra_use:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    subl $8, %esp
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %bh
+; X86-NEXT:    notb %bh
+; X86-NEXT:    movzbl %bh, %eax
+; X86-NEXT:    movl %eax, (%esp)
+; X86-NEXT:    calll use at PLT
+; X86-NEXT:    andb %bl, %bh
+; X86-NEXT:    movzbl %bh, %eax
+; X86-NEXT:    movl %eax, (%esp)
+; X86-NEXT:    calll use at PLT
+; X86-NEXT:    orb {{[0-9]+}}(%esp), %bl
+; X86-NEXT:    movl %ebx, %eax
+; X86-NEXT:    addl $8, %esp
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_and_xor_extra_use:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rbp
+; X64-NEXT:    pushq %r14
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    movl %esi, %ebx
+; X64-NEXT:    movl %edi, %ebp
+; X64-NEXT:    movl %ebp, %eax
+; X64-NEXT:    notb %al
+; X64-NEXT:    movzbl %al, %r14d
+; X64-NEXT:    movl %r14d, %edi
+; X64-NEXT:    callq use at PLT
+; X64-NEXT:    andb %bl, %r14b
+; X64-NEXT:    movzbl %r14b, %edi
+; X64-NEXT:    callq use at PLT
+; X64-NEXT:    orb %bpl, %bl
+; X64-NEXT:    movl %ebx, %eax
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    popq %r14
+; X64-NEXT:    popq %rbp
+; X64-NEXT:    retq
   %xor = xor i8 %x, -1
   call void @use(i8 %xor)
   %and = and i8 %xor, %y
@@ -116,11 +178,18 @@ define i8 @add_and_xor_extra_use(i8 %x, i8 %y) nounwind {
 }
 
 define i64 @add_and_xor_const(i64 %x) {
-; CHECK-LABEL: add_and_xor_const:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    orq $1, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: add_and_xor_const:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    orl $1, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_and_xor_const:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    orq $1, %rax
+; X64-NEXT:    retq
   %xor = xor i64 %x, -1
   %and = and i64 %xor, 1
   %add = add i64 %and, %x
@@ -128,12 +197,22 @@ define i64 @add_and_xor_const(i64 %x) {
 }
 
 define i64 @add_and_xor_const_wrong_op(i64 %x, i64 %y) {
-; CHECK-LABEL: add_and_xor_const_wrong_op:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    notl %esi
-; CHECK-NEXT:    andl $1, %esi
-; CHECK-NEXT:    leaq (%rsi,%rdi), %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: add_and_xor_const_wrong_op:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    notl %eax
+; X86-NEXT:    andl $1, %eax
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    adcl $0, %edx
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_and_xor_const_wrong_op:
+; X64:       # %bb.0:
+; X64-NEXT:    notl %esi
+; X64-NEXT:    andl $1, %esi
+; X64-NEXT:    leaq (%rsi,%rdi), %rax
+; X64-NEXT:    retq
   %xor = xor i64 %y, -1
   %and = and i64 %xor, 1
   %add = add i64 %and, %x
@@ -141,11 +220,18 @@ define i64 @add_and_xor_const_wrong_op(i64 %x, i64 %y) {
 }
 
 define i64 @add_and_xor_const_explicit_trunc(i64 %x) {
-; CHECK-LABEL: add_and_xor_const_explicit_trunc:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    orq $1, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: add_and_xor_const_explicit_trunc:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    orl $1, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_and_xor_const_explicit_trunc:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    orq $1, %rax
+; X64-NEXT:    retq
   %trunc = trunc i64 %x to i32
   %xor = xor i32 %trunc, -1
   %ext = sext i32 %xor to i64
@@ -155,15 +241,27 @@ define i64 @add_and_xor_const_explicit_trunc(i64 %x) {
 }
 
 define i64 @add_and_xor_const_explicit_trunc_wrong_mask(i64 %x) {
-; CHECK-LABEL: add_and_xor_const_explicit_trunc_wrong_mask:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    notl %eax
-; CHECK-NEXT:    movslq %eax, %rcx
-; CHECK-NEXT:    movabsq $4294967297, %rax # imm = 0x100000001
-; CHECK-NEXT:    andq %rcx, %rax
-; CHECK-NEXT:    addq %rdi, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: add_and_xor_const_explicit_trunc_wrong_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    notl %eax
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    shrl $31, %edx
+; X86-NEXT:    andl $1, %eax
+; X86-NEXT:    addl %ecx, %eax
+; X86-NEXT:    adcl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_and_xor_const_explicit_trunc_wrong_mask:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    notl %eax
+; X64-NEXT:    movslq %eax, %rcx
+; X64-NEXT:    movabsq $4294967297, %rax # imm = 0x100000001
+; X64-NEXT:    andq %rcx, %rax
+; X64-NEXT:    addq %rdi, %rax
+; X64-NEXT:    retq
   %trunc = trunc i64 %x to i32
   %xor = xor i32 %trunc, -1
   %ext = sext i32 %xor to i64
@@ -173,11 +271,17 @@ define i64 @add_and_xor_const_explicit_trunc_wrong_mask(i64 %x) {
 }
 
 define ptr @gep_and_xor(ptr %a, i64 %m) {
-; CHECK-LABEL: gep_and_xor:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    orq %rsi, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: gep_and_xor:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: gep_and_xor:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    orq %rsi, %rax
+; X64-NEXT:    retq
   %old = ptrtoint ptr %a to i64
   %old.not = and i64 %old, %m
   %offset = xor i64 %old.not, %m
@@ -186,11 +290,17 @@ define ptr @gep_and_xor(ptr %a, i64 %m) {
 }
 
 define ptr @gep_and_xor_const(ptr %a) {
-; CHECK-LABEL: gep_and_xor_const:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    orq $1, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: gep_and_xor_const:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    orl $1, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: gep_and_xor_const:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    orq $1, %rax
+; X64-NEXT:    retq
   %old = ptrtoint ptr %a to i64
   %old.not = and i64 %old, 1
   %offset = xor i64 %old.not, 1


        


More information about the llvm-commits mailing list