[llvm] 3886985 - [X86] Add tests for `(atomicrmw xor p, Imm)`; NFC

Noah Goldstein via llvm-commits llvm-commits at lists.llvm.org
Sun May 7 17:12:20 PDT 2023


Author: Noah Goldstein
Date: 2023-05-07T19:11:52-05:00
New Revision: 38869852177ca263e29b4f437eb3610ace842924

URL: https://github.com/llvm/llvm-project/commit/38869852177ca263e29b4f437eb3610ace842924
DIFF: https://github.com/llvm/llvm-project/commit/38869852177ca263e29b4f437eb3610ace842924.diff

LOG: [X86] Add tests for `(atomicrmw xor p, Imm)`; NFC

Reviewed By: RKSimon

Differential Revision: https://reviews.llvm.org/D149687

Added: 
    llvm/test/CodeGen/X86/atomic-xor.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/atomic-xor.ll b/llvm/test/CodeGen/X86/atomic-xor.ll
new file mode 100644
index 000000000000..d0738356bb62
--- /dev/null
+++ b/llvm/test/CodeGen/X86/atomic-xor.ll
@@ -0,0 +1,234 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
+
+define void @xor32_signbit_unused(ptr %p) nounwind {
+; X86-LABEL: xor32_signbit_unused:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    lock xorl $-2147483648, (%eax) # imm = 0x80000000
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor32_signbit_unused:
+; X64:       # %bb.0:
+; X64-NEXT:    lock xorl $-2147483648, (%rdi) # imm = 0x80000000
+; X64-NEXT:    retq
+  %r = atomicrmw xor ptr %p, i32 2147483648 monotonic
+  ret void
+}
+
+define i128 @xor128_signbit_used(ptr %p) nounwind {
+; X86-LABEL: xor128_signbit_used:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    andl $-8, %esp
+; X86-NEXT:    subl $16, %esp
+; X86-NEXT:    movl 8(%ebp), %esi
+; X86-NEXT:    movl %esp, %eax
+; X86-NEXT:    pushl $-2147483648 # imm = 0x80000000
+; X86-NEXT:    pushl $0
+; X86-NEXT:    pushl $0
+; X86-NEXT:    pushl $0
+; X86-NEXT:    pushl 12(%ebp)
+; X86-NEXT:    pushl %eax
+; X86-NEXT:    calll __sync_fetch_and_xor_16
+; X86-NEXT:    addl $20, %esp
+; X86-NEXT:    movl (%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl %edi, 8(%esi)
+; X86-NEXT:    movl %edx, 12(%esi)
+; X86-NEXT:    movl %eax, (%esi)
+; X86-NEXT:    movl %ecx, 4(%esi)
+; X86-NEXT:    movl %esi, %eax
+; X86-NEXT:    leal -8(%ebp), %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl $4
+;
+; X64-LABEL: xor128_signbit_used:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rax
+; X64-NEXT:    movabsq $-9223372036854775808, %rdx # imm = 0x8000000000000000
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    callq __sync_fetch_and_xor_16 at PLT
+; X64-NEXT:    popq %rcx
+; X64-NEXT:    retq
+  %r = atomicrmw xor ptr %p, i128 170141183460469231731687303715884105728 monotonic
+  ret i128 %r
+}
+
+define i64 @xor64_signbit_used(ptr %p) nounwind {
+; X86-LABEL: xor64_signbit_used:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl (%esi), %eax
+; X86-NEXT:    movl 4(%esi), %edx
+; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:  .LBB2_1: # %atomicrmw.start
+; X86-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-NEXT:    leal -2147483648(%edx), %ecx
+; X86-NEXT:    movl %eax, %ebx
+; X86-NEXT:    lock cmpxchg8b (%esi)
+; X86-NEXT:    jne .LBB2_1
+; X86-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor64_signbit_used:
+; X64:       # %bb.0:
+; X64-NEXT:    movq (%rdi), %rax
+; X64-NEXT:    movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000
+; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:  .LBB2_1: # %atomicrmw.start
+; X64-NEXT:    # =>This Inner Loop Header: Depth=1
+; X64-NEXT:    movq %rax, %rdx
+; X64-NEXT:    xorq %rcx, %rdx
+; X64-NEXT:    lock cmpxchgq %rdx, (%rdi)
+; X64-NEXT:    jne .LBB2_1
+; X64-NEXT:  # %bb.2: # %atomicrmw.end
+; X64-NEXT:    retq
+  %r = atomicrmw xor ptr %p, i64 9223372036854775808 monotonic
+  ret i64 %r
+}
+
+define i32 @xor32_signbit_used(ptr %p) nounwind {
+; X86-LABEL: xor32_signbit_used:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl (%ecx), %eax
+; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:  .LBB3_1: # %atomicrmw.start
+; X86-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-NEXT:    leal -2147483648(%eax), %edx
+; X86-NEXT:    lock cmpxchgl %edx, (%ecx)
+; X86-NEXT:    jne .LBB3_1
+; X86-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor32_signbit_used:
+; X64:       # %bb.0:
+; X64-NEXT:    movl (%rdi), %eax
+; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:  .LBB3_1: # %atomicrmw.start
+; X64-NEXT:    # =>This Inner Loop Header: Depth=1
+; X64-NEXT:    leal -2147483648(%rax), %ecx
+; X64-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-NEXT:    lock cmpxchgl %ecx, (%rdi)
+; X64-NEXT:    # kill: def $eax killed $eax def $rax
+; X64-NEXT:    jne .LBB3_1
+; X64-NEXT:  # %bb.2: # %atomicrmw.end
+; X64-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-NEXT:    retq
+  %r = atomicrmw xor ptr %p, i32 2147483648 monotonic
+  ret i32 %r
+}
+
+define i16 @xor16_signbit_used(ptr %p) nounwind {
+; X86-LABEL: xor16_signbit_used:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movzwl (%ecx), %eax
+; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:  .LBB4_1: # %atomicrmw.start
+; X86-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    xorl $32768, %edx # imm = 0x8000
+; X86-NEXT:    # kill: def $ax killed $ax killed $eax
+; X86-NEXT:    lock cmpxchgw %dx, (%ecx)
+; X86-NEXT:    # kill: def $ax killed $ax def $eax
+; X86-NEXT:    jne .LBB4_1
+; X86-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-NEXT:    # kill: def $ax killed $ax killed $eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor16_signbit_used:
+; X64:       # %bb.0:
+; X64-NEXT:    movzwl (%rdi), %eax
+; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:  .LBB4_1: # %atomicrmw.start
+; X64-NEXT:    # =>This Inner Loop Header: Depth=1
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    xorl $32768, %ecx # imm = 0x8000
+; X64-NEXT:    # kill: def $ax killed $ax killed $eax
+; X64-NEXT:    lock cmpxchgw %cx, (%rdi)
+; X64-NEXT:    # kill: def $ax killed $ax def $eax
+; X64-NEXT:    jne .LBB4_1
+; X64-NEXT:  # %bb.2: # %atomicrmw.end
+; X64-NEXT:    # kill: def $ax killed $ax killed $eax
+; X64-NEXT:    retq
+  %r = atomicrmw xor ptr %p, i16 32768 monotonic
+  ret i16 %r
+}
+
+define i8 @xor8_signbit_used(ptr %p) nounwind {
+; X86-LABEL: xor8_signbit_used:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movzbl (%ecx), %eax
+; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:  .LBB5_1: # %atomicrmw.start
+; X86-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    addb $-128, %dl
+; X86-NEXT:    lock cmpxchgb %dl, (%ecx)
+; X86-NEXT:    jne .LBB5_1
+; X86-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor8_signbit_used:
+; X64:       # %bb.0:
+; X64-NEXT:    movzbl (%rdi), %eax
+; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:  .LBB5_1: # %atomicrmw.start
+; X64-NEXT:    # =>This Inner Loop Header: Depth=1
+; X64-NEXT:    leal -128(%rax), %ecx
+; X64-NEXT:    # kill: def $al killed $al killed $rax
+; X64-NEXT:    lock cmpxchgb %cl, (%rdi)
+; X64-NEXT:    # kill: def $al killed $al def $rax
+; X64-NEXT:    jne .LBB5_1
+; X64-NEXT:  # %bb.2: # %atomicrmw.end
+; X64-NEXT:    # kill: def $al killed $al killed $rax
+; X64-NEXT:    retq
+  %r = atomicrmw xor ptr %p, i8 128 monotonic
+  ret i8 %r
+}
+
+define i32 @xor32_not_signbit_used(ptr %p) nounwind {
+; X86-LABEL: xor32_not_signbit_used:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl (%ecx), %eax
+; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:  .LBB6_1: # %atomicrmw.start
+; X86-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    xorl $-2147483647, %edx # imm = 0x80000001
+; X86-NEXT:    lock cmpxchgl %edx, (%ecx)
+; X86-NEXT:    jne .LBB6_1
+; X86-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor32_not_signbit_used:
+; X64:       # %bb.0:
+; X64-NEXT:    movl (%rdi), %eax
+; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:  .LBB6_1: # %atomicrmw.start
+; X64-NEXT:    # =>This Inner Loop Header: Depth=1
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    xorl $-2147483647, %ecx # imm = 0x80000001
+; X64-NEXT:    lock cmpxchgl %ecx, (%rdi)
+; X64-NEXT:    jne .LBB6_1
+; X64-NEXT:  # %bb.2: # %atomicrmw.end
+; X64-NEXT:    retq
+  %r = atomicrmw xor ptr %p, i32 2147483649 monotonic
+  ret i32 %r
+}


        


More information about the llvm-commits mailing list