[llvm] 5dd5859 - [X86] Add PR20841 test cases showing failure to reuse ZF from XADD ops

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sun Apr 25 03:50:28 PDT 2021


Author: Simon Pilgrim
Date: 2021-04-25T11:50:18+01:00
New Revision: 5dd5859c42dab143f21fd4ca4204c6084a929054

URL: https://github.com/llvm/llvm-project/commit/5dd5859c42dab143f21fd4ca4204c6084a929054
DIFF: https://github.com/llvm/llvm-project/commit/5dd5859c42dab143f21fd4ca4204c6084a929054.diff

LOG: [X86] Add PR20841 test cases showing failure to reuse ZF from XADD ops

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/atomic-flags.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/atomic-flags.ll b/llvm/test/CodeGen/X86/atomic-flags.ll
index ba784bc1f408..dfd916f81216 100644
--- a/llvm/test/CodeGen/X86/atomic-flags.ll
+++ b/llvm/test/CodeGen/X86/atomic-flags.ll
@@ -123,3 +123,107 @@ L3:
 L4:
   ret i32 4
 }
+
+; FIXME: PR20841 - ensure we reuse the ZF flag from XADD for compares with zero.
+
+define zeroext i1 @xadd_cmp0_i64(i64* %x) nounwind {
+; X64-LABEL: xadd_cmp0_i64:
+; X64:       # %bb.0:
+; X64-NEXT:    movl $1, %eax
+; X64-NEXT:    lock xaddq %rax, (%rdi)
+; X64-NEXT:    testq %rax, %rax
+; X64-NEXT:    sete %al
+; X64-NEXT:    retq
+;
+; X86-LABEL: xadd_cmp0_i64:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl (%esi), %eax
+; X86-NEXT:    movl 4(%esi), %edx
+; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:  .LBB2_1: # %atomicrmw.start
+; X86-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-NEXT:    movl %eax, %ebx
+; X86-NEXT:    addl $1, %ebx
+; X86-NEXT:    movl %edx, %ecx
+; X86-NEXT:    adcl $0, %ecx
+; X86-NEXT:    lock cmpxchg8b (%esi)
+; X86-NEXT:    jne .LBB2_1
+; X86-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-NEXT:    orl %edx, %eax
+; X86-NEXT:    sete %al
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    retl
+  %add = atomicrmw add i64* %x, i64 1 seq_cst
+  %cmp = icmp eq i64 %add, 0
+  ret i1 %cmp
+}
+
+define zeroext i1 @xadd_cmp0_i32(i32* %x) nounwind {
+; X64-LABEL: xadd_cmp0_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    movl $1, %eax
+; X64-NEXT:    lock xaddl %eax, (%rdi)
+; X64-NEXT:    testl %eax, %eax
+; X64-NEXT:    setne %al
+; X64-NEXT:    retq
+;
+; X86-LABEL: xadd_cmp0_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl $1, %ecx
+; X86-NEXT:    lock xaddl %ecx, (%eax)
+; X86-NEXT:    testl %ecx, %ecx
+; X86-NEXT:    setne %al
+; X86-NEXT:    retl
+  %add = atomicrmw add i32* %x, i32 1 seq_cst
+  %cmp = icmp ne i32 %add, 0
+  ret i1 %cmp
+}
+
+define zeroext i1 @xadd_cmp0_i16(i16* %x) nounwind {
+; X64-LABEL: xadd_cmp0_i16:
+; X64:       # %bb.0:
+; X64-NEXT:    movw $1, %ax
+; X64-NEXT:    lock xaddw %ax, (%rdi)
+; X64-NEXT:    testw %ax, %ax
+; X64-NEXT:    sete %al
+; X64-NEXT:    retq
+;
+; X86-LABEL: xadd_cmp0_i16:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movw $1, %cx
+; X86-NEXT:    lock xaddw %cx, (%eax)
+; X86-NEXT:    testw %cx, %cx
+; X86-NEXT:    sete %al
+; X86-NEXT:    retl
+  %add = atomicrmw add i16* %x, i16 1 seq_cst
+  %cmp = icmp eq i16 %add, 0
+  ret i1 %cmp
+}
+
+define zeroext i1 @xadd_cmp0_i8(i8* %x) nounwind {
+; X64-LABEL: xadd_cmp0_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    movb $1, %al
+; X64-NEXT:    lock xaddb %al, (%rdi)
+; X64-NEXT:    testb %al, %al
+; X64-NEXT:    setne %al
+; X64-NEXT:    retq
+;
+; X86-LABEL: xadd_cmp0_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movb $1, %cl
+; X86-NEXT:    lock xaddb %cl, (%eax)
+; X86-NEXT:    testb %cl, %cl
+; X86-NEXT:    setne %al
+; X86-NEXT:    retl
+  %add = atomicrmw add i8* %x, i8 1 seq_cst
+  %cmp = icmp ne i8 %add, 0
+  ret i1 %cmp
+}


        


More information about the llvm-commits mailing list