[llvm] 596af14 - [X86] setcc.ll - add PR39174 test case and i686 coverage

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 31 13:29:23 PDT 2022


Author: Simon Pilgrim
Date: 2022-03-31T21:29:12+01:00
New Revision: 596af141b24c98886673c56b1068bb293f5d1793

URL: https://github.com/llvm/llvm-project/commit/596af141b24c98886673c56b1068bb293f5d1793
DIFF: https://github.com/llvm/llvm-project/commit/596af141b24c98886673c56b1068bb293f5d1793.diff

LOG: [X86] setcc.ll - add PR39174 test case and i686 coverage

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/setcc.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/setcc.ll b/llvm/test/CodeGen/X86/setcc.ll
index 50f762de251bd..57431887f58c6 100644
--- a/llvm/test/CodeGen/X86/setcc.ll
+++ b/llvm/test/CodeGen/X86/setcc.ll
@@ -1,41 +1,69 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
+; RUN: llc < %s -mtriple=i686-apple-darwin | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s --check-prefixes=X64
 ; rdar://7329206
 
 define zeroext i16 @t1(i16 zeroext %x) nounwind readnone ssp {
-; CHECK-LABEL: t1:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    cmpw $27, %di
-; CHECK-NEXT:    setae %al
-; CHECK-NEXT:    shll $5, %eax
-; CHECK-NEXT:    retq
+; X86-LABEL: t1:
+; X86:       ## %bb.0:
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    cmpw $27, {{[0-9]+}}(%esp)
+; X86-NEXT:    setae %al
+; X86-NEXT:    shll $5, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: t1:
+; X64:       ## %bb.0:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpw $27, %di
+; X64-NEXT:    setae %al
+; X64-NEXT:    shll $5, %eax
+; X64-NEXT:    retq
   %t0 = icmp ugt i16 %x, 26
   %if = select i1 %t0, i16 32, i16 0
   ret i16 %if
 }
 
 define zeroext i16 @t2(i16 zeroext %x) nounwind readnone ssp {
-; CHECK-LABEL: t2:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    cmpw $26, %di
-; CHECK-NEXT:    setb %al
-; CHECK-NEXT:    shll $5, %eax
-; CHECK-NEXT:    retq
+; X86-LABEL: t2:
+; X86:       ## %bb.0:
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    cmpw $26, {{[0-9]+}}(%esp)
+; X86-NEXT:    setb %al
+; X86-NEXT:    shll $5, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: t2:
+; X64:       ## %bb.0:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpw $26, %di
+; X64-NEXT:    setb %al
+; X64-NEXT:    shll $5, %eax
+; X64-NEXT:    retq
   %t0 = icmp ult i16 %x, 26
   %if = select i1 %t0, i16 32, i16 0
   ret i16 %if
 }
 
 define i64 @t3(i64 %x) nounwind readnone ssp {
-; CHECK-LABEL: t3:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    cmpq $18, %rdi
-; CHECK-NEXT:    setb %al
-; CHECK-NEXT:    shlq $6, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: t3:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl $18, {{[0-9]+}}(%esp)
+; X86-NEXT:    sbbl $0, %eax
+; X86-NEXT:    setb %al
+; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    shll $6, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    retl
+;
+; X64-LABEL: t3:
+; X64:       ## %bb.0:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpq $18, %rdi
+; X64-NEXT:    setb %al
+; X64-NEXT:    shlq $6, %rax
+; X64-NEXT:    retq
   %t0 = icmp ult i64 %x, 18
   %if = select i1 %t0, i64 64, i64 0
   ret i64 %if
@@ -44,14 +72,23 @@ define i64 @t3(i64 %x) nounwind readnone ssp {
 @v4 = common global i32 0, align 4
 
 define i32 @t4(i32 %a) {
-; CHECK-LABEL: t4:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    movq _v4 at GOTPCREL(%rip), %rcx
-; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    cmpl $1, (%rcx)
-; CHECK-NEXT:    adcw $1, %ax
-; CHECK-NEXT:    shll $16, %eax
-; CHECK-NEXT:    retq
+; X86-LABEL: t4:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl L_v4$non_lazy_ptr, %ecx
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    cmpl $1, (%ecx)
+; X86-NEXT:    adcw $1, %ax
+; X86-NEXT:    shll $16, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: t4:
+; X64:       ## %bb.0:
+; X64-NEXT:    movq _v4 at GOTPCREL(%rip), %rcx
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpl $1, (%rcx)
+; X64-NEXT:    adcw $1, %ax
+; X64-NEXT:    shll $16, %eax
+; X64-NEXT:    retq
   %t0 = load i32, i32* @v4, align 4
   %not.tobool = icmp eq i32 %t0, 0
   %conv.i = sext i1 %not.tobool to i16
@@ -63,11 +100,17 @@ define i32 @t4(i32 %a) {
 }
 
 define i8 @t5(i32 %a) #0 {
-; CHECK-LABEL: t5:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    testl %edi, %edi
-; CHECK-NEXT:    setns %al
-; CHECK-NEXT:    retq
+; X86-LABEL: t5:
+; X86:       ## %bb.0:
+; X86-NEXT:    cmpl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    setns %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: t5:
+; X64:       ## %bb.0:
+; X64-NEXT:    testl %edi, %edi
+; X64-NEXT:    setns %al
+; X64-NEXT:    retq
   %.lobit = lshr i32 %a, 31
   %trunc = trunc i32 %.lobit to i8
   %.not = xor i8 %trunc, 1
@@ -75,25 +118,89 @@ define i8 @t5(i32 %a) #0 {
 }
 
 define zeroext i1 @t6(i32 %a) #0 {
-; CHECK-LABEL: t6:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    testl %edi, %edi
-; CHECK-NEXT:    setns %al
-; CHECK-NEXT:    retq
+; X86-LABEL: t6:
+; X86:       ## %bb.0:
+; X86-NEXT:    cmpl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    setns %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: t6:
+; X64:       ## %bb.0:
+; X64-NEXT:    testl %edi, %edi
+; X64-NEXT:    setns %al
+; X64-NEXT:    retq
   %.lobit = lshr i32 %a, 31
   %trunc = trunc i32 %.lobit to i1
   %.not = xor i1 %trunc, 1
   ret i1 %.not
 }
 
+; PR39174
+define zeroext i1 @t7(i32 %0) {
+; X86-LABEL: t7:
+; X86:       ## %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movb $19, %al
+; X86-NEXT:    shrb %cl, %al
+; X86-NEXT:    andb $1, %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: t7:
+; X64:       ## %bb.0:
+; X64-NEXT:    movl %edi, %ecx
+; X64-NEXT:    movb $19, %al
+; X64-NEXT:    ## kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shrb %cl, %al
+; X64-NEXT:    andb $1, %al
+; X64-NEXT:    retq
+  %2 = trunc i32 %0 to i5
+  %3 = lshr i5 -13, %2
+  %4 = and i5 %3, 1
+  %5 = icmp ne i5 %4, 0
+  ret i1 %5
+}
+
+define zeroext i1 @t8(i8 %0, i8 %1) {
+; X86-LABEL: t8:
+; X86:       ## %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    shrb %cl, %al
+; X86-NEXT:    andb $1, %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: t8:
+; X64:       ## %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    ## kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shrb %cl, %al
+; X64-NEXT:    andb $1, %al
+; X64-NEXT:    ## kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+  %3 = lshr i8 %0, %1
+  %4 = and i8 %3, 1
+  %5 = icmp ne i8 %4, 0
+  ret i1 %5
+}
+
 define i16 @shift_and(i16 %a) {
-; CHECK-LABEL: shift_and:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    shrl $10, %eax
-; CHECK-NEXT:    andl $1, %eax
-; CHECK-NEXT:    ## kill: def $ax killed $ax killed $eax
-; CHECK-NEXT:    retq
+; X86-LABEL: shift_and:
+; X86:       ## %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    andb $4, %al
+; X86-NEXT:    shrb $2, %al
+; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    ## kill: def $ax killed $ax killed $eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: shift_and:
+; X64:       ## %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    shrl $10, %eax
+; X64-NEXT:    andl $1, %eax
+; X64-NEXT:    ## kill: def $ax killed $ax killed $eax
+; X64-NEXT:    retq
   %and = and i16 %a, 1024
   %cmp = icmp ne i16 %and, 0
   %conv = zext i1 %cmp to i16


        


More information about the llvm-commits mailing list