[llvm] r358140 - [X86] Autogenerate complete checks. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 10 15:35:24 PDT 2019


Author: ctopper
Date: Wed Apr 10 15:35:24 2019
New Revision: 358140

URL: http://llvm.org/viewvc/llvm-project?rev=358140&view=rev
Log:
[X86] Autogenerate complete checks. NFC

Modified:
    llvm/trunk/test/CodeGen/X86/atomic6432.ll

Modified: llvm/trunk/test/CodeGen/X86/atomic6432.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic6432.ll?rev=358140&r1=358139&r2=358140&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic6432.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic6432.ll Wed Apr 10 15:35:24 2019
@@ -1,211 +1,905 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -O0 -mtriple=i686-- -mcpu=corei7 -verify-machineinstrs | FileCheck %s --check-prefix X32
 
 @sc64 = external global i64
 
 define void @atomic_fetch_add64() nounwind {
-; X32-LABEL:   atomic_fetch_add64:
+; X32-LABEL: atomic_fetch_add64:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    pushl %ebx
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    subl $72, %esp
+; X32-NEXT:    movl sc64+4, %eax
+; X32-NEXT:    movl sc64, %ecx
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB0_1
+; X32-NEXT:  .LBB0_1: # %atomicrmw.start
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    addl $1, %edx
+; X32-NEXT:    movl %eax, %esi
+; X32-NEXT:    adcl $0, %esi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl %esi, %ecx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jne .LBB0_1
+; X32-NEXT:    jmp .LBB0_2
+; X32-NEXT:  .LBB0_2: # %atomicrmw.end
+; X32-NEXT:    movl sc64+4, %eax
+; X32-NEXT:    movl sc64, %ecx
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB0_3
+; X32-NEXT:  .LBB0_3: # %atomicrmw.start2
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    addl $3, %edx
+; X32-NEXT:    movl %eax, %esi
+; X32-NEXT:    adcl $0, %esi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl %esi, %ecx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jne .LBB0_3
+; X32-NEXT:    jmp .LBB0_4
+; X32-NEXT:  .LBB0_4: # %atomicrmw.end1
+; X32-NEXT:    movl sc64+4, %eax
+; X32-NEXT:    movl sc64, %ecx
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB0_5
+; X32-NEXT:  .LBB0_5: # %atomicrmw.start8
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    addl $5, %edx
+; X32-NEXT:    movl %eax, %esi
+; X32-NEXT:    adcl $0, %esi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl %esi, %ecx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %eax, %ecx
+; X32-NEXT:    movl %edx, %esi
+; X32-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jne .LBB0_5
+; X32-NEXT:    jmp .LBB0_6
+; X32-NEXT:  .LBB0_6: # %atomicrmw.end7
+; X32-NEXT:    movl sc64+4, %eax
+; X32-NEXT:    movl sc64, %ecx
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB0_7
+; X32-NEXT:  .LBB0_7: # %atomicrmw.start14
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT:    addl %esi, %edx
+; X32-NEXT:    movl %eax, %edi
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    adcl %ebx, %edi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, (%esp) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl %edi, %ecx
+; X32-NEXT:    movl (%esp), %ebx # 4-byte Reload
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jne .LBB0_7
+; X32-NEXT:    jmp .LBB0_8
+; X32-NEXT:  .LBB0_8: # %atomicrmw.end13
+; X32-NEXT:    addl $72, %esp
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    popl %ebx
+; X32-NEXT:    retl
 entry:
   %t1 = atomicrmw add  i64* @sc64, i64 1 acquire
-; X32:       addl
-; X32:       adcl
-; X32:       lock
-; X32:       cmpxchg8b
   %t2 = atomicrmw add  i64* @sc64, i64 3 acquire
-; X32:       addl
-; X32:       adcl
-; X32:       lock
-; X32:       cmpxchg8b
   %t3 = atomicrmw add  i64* @sc64, i64 5 acquire
-; X32:       addl
-; X32:       adcl
-; X32:       lock
-; X32:       cmpxchg8b
   %t4 = atomicrmw add  i64* @sc64, i64 %t3 acquire
-; X32:       addl
-; X32:       adcl
-; X32:       lock
-; X32:       cmpxchg8b
   ret void
-; X32:       ret
 }
 
 define void @atomic_fetch_sub64() nounwind {
-; X32-LABEL:   atomic_fetch_sub64:
+; X32-LABEL: atomic_fetch_sub64:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %ebx
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    subl $72, %esp
+; X32-NEXT:    movl sc64+4, %eax
+; X32-NEXT:    movl sc64, %ecx
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB1_1
+; X32-NEXT:  .LBB1_1: # %atomicrmw.start
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    addl $-1, %edx
+; X32-NEXT:    movl %eax, %esi
+; X32-NEXT:    adcl $-1, %esi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl %esi, %ecx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jne .LBB1_1
+; X32-NEXT:    jmp .LBB1_2
+; X32-NEXT:  .LBB1_2: # %atomicrmw.end
+; X32-NEXT:    movl sc64+4, %eax
+; X32-NEXT:    movl sc64, %ecx
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB1_3
+; X32-NEXT:  .LBB1_3: # %atomicrmw.start2
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    addl $-3, %edx
+; X32-NEXT:    movl %eax, %esi
+; X32-NEXT:    adcl $-1, %esi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl %esi, %ecx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jne .LBB1_3
+; X32-NEXT:    jmp .LBB1_4
+; X32-NEXT:  .LBB1_4: # %atomicrmw.end1
+; X32-NEXT:    movl sc64+4, %eax
+; X32-NEXT:    movl sc64, %ecx
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB1_5
+; X32-NEXT:  .LBB1_5: # %atomicrmw.start8
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    addl $-5, %edx
+; X32-NEXT:    movl %eax, %esi
+; X32-NEXT:    adcl $-1, %esi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl %esi, %ecx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %eax, %ecx
+; X32-NEXT:    movl %edx, %esi
+; X32-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jne .LBB1_5
+; X32-NEXT:    jmp .LBB1_6
+; X32-NEXT:  .LBB1_6: # %atomicrmw.end7
+; X32-NEXT:    movl sc64+4, %eax
+; X32-NEXT:    movl sc64, %ecx
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB1_7
+; X32-NEXT:  .LBB1_7: # %atomicrmw.start14
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT:    subl %esi, %edx
+; X32-NEXT:    movl %eax, %edi
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    sbbl %ebx, %edi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, (%esp) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl %edi, %ecx
+; X32-NEXT:    movl (%esp), %ebx # 4-byte Reload
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jne .LBB1_7
+; X32-NEXT:    jmp .LBB1_8
+; X32-NEXT:  .LBB1_8: # %atomicrmw.end13
+; X32-NEXT:    addl $72, %esp
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    popl %ebx
+; X32-NEXT:    retl
   %t1 = atomicrmw sub  i64* @sc64, i64 1 acquire
-; X32:       addl $-1
-; X32:       adcl $-1
-; X32:       lock
-; X32:       cmpxchg8b
   %t2 = atomicrmw sub  i64* @sc64, i64 3 acquire
-; X32:       addl $-3
-; X32:       adcl $-1
-; X32:       lock
-; X32:       cmpxchg8b
   %t3 = atomicrmw sub  i64* @sc64, i64 5 acquire
-; X32:       addl $-5
-; X32:       adcl $-1
-; X32:       lock
-; X32:       cmpxchg8b
   %t4 = atomicrmw sub  i64* @sc64, i64 %t3 acquire
-; X32:       subl
-; X32:       sbbl
-; X32:       lock
-; X32:       cmpxchg8b
   ret void
-; X32:       ret
 }
 
 define void @atomic_fetch_and64() nounwind {
-; X32-LABEL:   atomic_fetch_and64:
+; X32-LABEL: atomic_fetch_and64:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %ebx
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    subl $56, %esp
+; X32-NEXT:    movl sc64+4, %eax
+; X32-NEXT:    movl sc64, %ecx
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB2_1
+; X32-NEXT:  .LBB2_1: # %atomicrmw.start
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    andl $3, %edx
+; X32-NEXT:    xorl %esi, %esi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl %esi, %ecx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jne .LBB2_1
+; X32-NEXT:    jmp .LBB2_2
+; X32-NEXT:  .LBB2_2: # %atomicrmw.end
+; X32-NEXT:    movl sc64+4, %eax
+; X32-NEXT:    movl sc64, %ecx
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB2_3
+; X32-NEXT:  .LBB2_3: # %atomicrmw.start2
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    andl $1, %edx
+; X32-NEXT:    movl %eax, %esi
+; X32-NEXT:    andl $1, %esi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl %esi, %ecx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %eax, %ecx
+; X32-NEXT:    movl %edx, %esi
+; X32-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jne .LBB2_3
+; X32-NEXT:    jmp .LBB2_4
+; X32-NEXT:  .LBB2_4: # %atomicrmw.end1
+; X32-NEXT:    movl sc64+4, %eax
+; X32-NEXT:    movl sc64, %ecx
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB2_5
+; X32-NEXT:  .LBB2_5: # %atomicrmw.start8
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT:    andl %esi, %edx
+; X32-NEXT:    movl %eax, %edi
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    andl %ebx, %edi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, (%esp) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl %edi, %ecx
+; X32-NEXT:    movl (%esp), %ebx # 4-byte Reload
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jne .LBB2_5
+; X32-NEXT:    jmp .LBB2_6
+; X32-NEXT:  .LBB2_6: # %atomicrmw.end7
+; X32-NEXT:    addl $56, %esp
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    popl %ebx
+; X32-NEXT:    retl
   %t1 = atomicrmw and  i64* @sc64, i64 3 acquire
-; X32:       andl $3
-; X32-NOT:       andl
-; X32:       lock
-; X32:       cmpxchg8b
   %t2 = atomicrmw and  i64* @sc64, i64 4294967297 acquire
-; X32:       andl $1
-; X32:       andl $1
-; X32:       lock
-; X32:       cmpxchg8b
   %t3 = atomicrmw and  i64* @sc64, i64 %t2 acquire
-; X32:       andl
-; X32:       andl
-; X32:       lock
-; X32:       cmpxchg8b
   ret void
-; X32:       ret
 }
 
 define void @atomic_fetch_or64() nounwind {
-; X32-LABEL:   atomic_fetch_or64:
+; X32-LABEL: atomic_fetch_or64:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %ebx
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    subl $56, %esp
+; X32-NEXT:    movl sc64+4, %eax
+; X32-NEXT:    movl sc64, %ecx
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB3_1
+; X32-NEXT:  .LBB3_1: # %atomicrmw.start
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    orl $3, %edx
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jne .LBB3_1
+; X32-NEXT:    jmp .LBB3_2
+; X32-NEXT:  .LBB3_2: # %atomicrmw.end
+; X32-NEXT:    movl sc64+4, %eax
+; X32-NEXT:    movl sc64, %ecx
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB3_3
+; X32-NEXT:  .LBB3_3: # %atomicrmw.start2
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    orl $1, %edx
+; X32-NEXT:    movl %eax, %esi
+; X32-NEXT:    orl $1, %esi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl %esi, %ecx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %eax, %ecx
+; X32-NEXT:    movl %edx, %esi
+; X32-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jne .LBB3_3
+; X32-NEXT:    jmp .LBB3_4
+; X32-NEXT:  .LBB3_4: # %atomicrmw.end1
+; X32-NEXT:    movl sc64+4, %eax
+; X32-NEXT:    movl sc64, %ecx
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB3_5
+; X32-NEXT:  .LBB3_5: # %atomicrmw.start8
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT:    orl %esi, %edx
+; X32-NEXT:    movl %eax, %edi
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    orl %ebx, %edi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, (%esp) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl %edi, %ecx
+; X32-NEXT:    movl (%esp), %ebx # 4-byte Reload
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jne .LBB3_5
+; X32-NEXT:    jmp .LBB3_6
+; X32-NEXT:  .LBB3_6: # %atomicrmw.end7
+; X32-NEXT:    addl $56, %esp
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    popl %ebx
+; X32-NEXT:    retl
   %t1 = atomicrmw or   i64* @sc64, i64 3 acquire
-; X32:       orl $3
-; X32-NOT:       orl
-; X32:       lock
-; X32:       cmpxchg8b
   %t2 = atomicrmw or   i64* @sc64, i64 4294967297 acquire
-; X32:       orl $1
-; X32:       orl $1
-; X32:       lock
-; X32:       cmpxchg8b
   %t3 = atomicrmw or   i64* @sc64, i64 %t2 acquire
-; X32:       orl
-; X32:       orl
-; X32:       lock
-; X32:       cmpxchg8b
   ret void
-; X32:       ret
 }
 
 define void @atomic_fetch_xor64() nounwind {
-; X32-LABEL:   atomic_fetch_xor64:
+; X32-LABEL: atomic_fetch_xor64:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %ebx
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    subl $56, %esp
+; X32-NEXT:    movl sc64+4, %eax
+; X32-NEXT:    movl sc64, %ecx
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB4_1
+; X32-NEXT:  .LBB4_1: # %atomicrmw.start
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    xorl $3, %edx
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jne .LBB4_1
+; X32-NEXT:    jmp .LBB4_2
+; X32-NEXT:  .LBB4_2: # %atomicrmw.end
+; X32-NEXT:    movl sc64+4, %eax
+; X32-NEXT:    movl sc64, %ecx
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB4_3
+; X32-NEXT:  .LBB4_3: # %atomicrmw.start2
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    xorl $1, %edx
+; X32-NEXT:    movl %eax, %esi
+; X32-NEXT:    xorl $1, %esi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl %esi, %ecx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %eax, %ecx
+; X32-NEXT:    movl %edx, %esi
+; X32-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jne .LBB4_3
+; X32-NEXT:    jmp .LBB4_4
+; X32-NEXT:  .LBB4_4: # %atomicrmw.end1
+; X32-NEXT:    movl sc64+4, %eax
+; X32-NEXT:    movl sc64, %ecx
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB4_5
+; X32-NEXT:  .LBB4_5: # %atomicrmw.start8
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT:    xorl %esi, %edx
+; X32-NEXT:    movl %eax, %edi
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    xorl %ebx, %edi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, (%esp) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl %edi, %ecx
+; X32-NEXT:    movl (%esp), %ebx # 4-byte Reload
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jne .LBB4_5
+; X32-NEXT:    jmp .LBB4_6
+; X32-NEXT:  .LBB4_6: # %atomicrmw.end7
+; X32-NEXT:    addl $56, %esp
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    popl %ebx
+; X32-NEXT:    retl
   %t1 = atomicrmw xor  i64* @sc64, i64 3 acquire
-; X32:       xorl
-; X32-NOT:       xorl
-; X32:       lock
-; X32:       cmpxchg8b
   %t2 = atomicrmw xor  i64* @sc64, i64 4294967297 acquire
-; X32:       xorl $1
-; X32:       xorl $1
-; X32:       lock
-; X32:       cmpxchg8b
   %t3 = atomicrmw xor  i64* @sc64, i64 %t2 acquire
-; X32:       xorl
-; X32:       xorl
-; X32:       lock
-; X32:       cmpxchg8b
   ret void
-; X32:       ret
 }
 
 define void @atomic_fetch_nand64(i64 %x) nounwind {
-; X32-LABEL:   atomic_fetch_nand64:
+; X32-LABEL: atomic_fetch_nand64:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %ebx
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    subl $24, %esp
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl sc64+4, %edx
+; X32-NEXT:    movl sc64, %esi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB5_1
+; X32-NEXT:  .LBB5_1: # %atomicrmw.start
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %eax, %edx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT:    andl %esi, %edx
+; X32-NEXT:    movl %ecx, %edi
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    andl %ebx, %edi
+; X32-NEXT:    notl %edi
+; X32-NEXT:    notl %edx
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, (%esp) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl (%esp), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edi, %ebx
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jne .LBB5_1
+; X32-NEXT:    jmp .LBB5_2
+; X32-NEXT:  .LBB5_2: # %atomicrmw.end
+; X32-NEXT:    addl $24, %esp
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    popl %ebx
+; X32-NEXT:    retl
   %t1 = atomicrmw nand i64* @sc64, i64 %x acquire
-; X32:       andl
-; X32:       andl
-; X32:       notl
-; X32:       notl
-; X32:       lock
-; X32:       cmpxchg8b
   ret void
-; X32:       ret
 }
 
 define void @atomic_fetch_max64(i64 %x) nounwind {
-; X32-LABEL:   atomic_fetch_max64:
+; X32-LABEL: atomic_fetch_max64:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %ebx
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    subl $28, %esp
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl sc64+4, %edx
+; X32-NEXT:    movl sc64, %esi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB6_1
+; X32-NEXT:  .LBB6_1: # %atomicrmw.start
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT:    subl %esi, %edx
+; X32-NEXT:    movl %eax, %edi
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    sbbl %ebx, %edi
+; X32-NEXT:    cmovgel %eax, %ebx
+; X32-NEXT:    cmovgel %ecx, %esi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl %ebx, %ecx
+; X32-NEXT:    movl %esi, %ebx
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %edi, (%esp) # 4-byte Spill
+; X32-NEXT:    jne .LBB6_1
+; X32-NEXT:    jmp .LBB6_2
+; X32-NEXT:  .LBB6_2: # %atomicrmw.end
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    popl %ebx
+; X32-NEXT:    retl
   %t1 = atomicrmw max  i64* @sc64, i64 %x acquire
-; X32:       subl
-; X32:       subl
-; X32:       cmov
-; X32:       cmov
-; X32:       lock
-; X32:       cmpxchg8b
   ret void
-; X32:       ret
 }
 
 define void @atomic_fetch_min64(i64 %x) nounwind {
-; X32-LABEL:   atomic_fetch_min64:
+; X32-LABEL: atomic_fetch_min64:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %ebx
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    subl $28, %esp
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl sc64+4, %edx
+; X32-NEXT:    movl sc64, %esi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB7_1
+; X32-NEXT:  .LBB7_1: # %atomicrmw.start
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT:    subl %ecx, %edx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT:    sbbl %eax, %esi
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT:    cmovgel %eax, %edi
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    cmovgel %ecx, %ebx
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl %edi, %ecx
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %esi, (%esp) # 4-byte Spill
+; X32-NEXT:    jne .LBB7_1
+; X32-NEXT:    jmp .LBB7_2
+; X32-NEXT:  .LBB7_2: # %atomicrmw.end
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    popl %ebx
+; X32-NEXT:    retl
   %t1 = atomicrmw min  i64* @sc64, i64 %x acquire
-; X32:       subl
-; X32:       subl
-; X32:       cmov
-; X32:       cmov
-; X32:       lock
-; X32:       cmpxchg8b
   ret void
-; X32:       ret
 }
 
 define void @atomic_fetch_umax64(i64 %x) nounwind {
-; X32-LABEL:   atomic_fetch_umax64:
+; X32-LABEL: atomic_fetch_umax64:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %ebx
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    subl $28, %esp
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl sc64+4, %edx
+; X32-NEXT:    movl sc64, %esi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB8_1
+; X32-NEXT:  .LBB8_1: # %atomicrmw.start
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT:    subl %ecx, %edx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT:    sbbl %eax, %esi
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT:    cmovbl %eax, %edi
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    cmovbl %ecx, %ebx
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl %edi, %ecx
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %esi, (%esp) # 4-byte Spill
+; X32-NEXT:    jne .LBB8_1
+; X32-NEXT:    jmp .LBB8_2
+; X32-NEXT:  .LBB8_2: # %atomicrmw.end
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    popl %ebx
+; X32-NEXT:    retl
   %t1 = atomicrmw umax i64* @sc64, i64 %x acquire
-; X32:       subl
-; X32:       subl
-; X32:       cmov
-; X32:       cmov
-; X32:       lock
-; X32:       cmpxchg8b
   ret void
-; X32:       ret
 }
 
 define void @atomic_fetch_umin64(i64 %x) nounwind {
-; X32-LABEL:   atomic_fetch_umin64:
+; X32-LABEL: atomic_fetch_umin64:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %ebx
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    subl $28, %esp
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl sc64+4, %edx
+; X32-NEXT:    movl sc64, %esi
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB9_1
+; X32-NEXT:  .LBB9_1: # %atomicrmw.start
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT:    subl %ecx, %edx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT:    sbbl %eax, %esi
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT:    cmovael %eax, %edi
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    cmovael %ecx, %ebx
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %edx
+; X32-NEXT:    movl %edi, %ecx
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %esi, (%esp) # 4-byte Spill
+; X32-NEXT:    jne .LBB9_1
+; X32-NEXT:    jmp .LBB9_2
+; X32-NEXT:  .LBB9_2: # %atomicrmw.end
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    popl %ebx
+; X32-NEXT:    retl
   %t1 = atomicrmw umin i64* @sc64, i64 %x acquire
-; X32:       subl
-; X32:       subl
-; X32:       cmov
-; X32:       cmov
-; X32:       lock
-; X32:       cmpxchg8b
   ret void
-; X32:       ret
 }
 
 define void @atomic_fetch_cmpxchg64() nounwind {
-; X32-LABEL:   atomic_fetch_cmpxchg64:
+; X32-LABEL: atomic_fetch_cmpxchg64:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %ebx
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    xorl %eax, %eax
+; X32-NEXT:    movl $1, %ebx
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %edx, (%esp) # 4-byte Spill
+; X32-NEXT:    addl $12, %esp
+; X32-NEXT:    popl %ebx
+; X32-NEXT:    retl
   %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire acquire
-; X32:       lock
-; X32:       cmpxchg8b
   ret void
-; X32:       ret
 }
 
 define void @atomic_fetch_store64(i64 %x) nounwind {
-; X32-LABEL:   atomic_fetch_store64:
+; X32-LABEL: atomic_fetch_store64:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %ebx
+; X32-NEXT:    subl $20, %esp
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT:    movl sc64+4, %eax
+; X32-NEXT:    movl sc64, %edx
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB11_1
+; X32-NEXT:  .LBB11_1: # %atomicrmw.start
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %eax, (%esp) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl (%esp), %edx # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jne .LBB11_1
+; X32-NEXT:    jmp .LBB11_2
+; X32-NEXT:  .LBB11_2: # %atomicrmw.end
+; X32-NEXT:    addl $20, %esp
+; X32-NEXT:    popl %ebx
+; X32-NEXT:    retl
   store atomic i64 %x, i64* @sc64 release, align 8
-; X32:       lock
-; X32:       cmpxchg8b
   ret void
-; X32:       ret
 }
 
 define void @atomic_fetch_swap64(i64 %x) nounwind {
-; X32-LABEL:   atomic_fetch_swap64:
+; X32-LABEL: atomic_fetch_swap64:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %ebx
+; X32-NEXT:    subl $20, %esp
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT:    movl sc64+4, %eax
+; X32-NEXT:    movl sc64, %edx
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jmp .LBB12_1
+; X32-NEXT:  .LBB12_1: # %atomicrmw.start
+; X32-NEXT:    # =>This Inner Loop Header: Depth=1
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl %eax, (%esp) # 4-byte Spill
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl (%esp), %edx # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT:    lock cmpxchg8b sc64
+; X32-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    jne .LBB12_1
+; X32-NEXT:    jmp .LBB12_2
+; X32-NEXT:  .LBB12_2: # %atomicrmw.end
+; X32-NEXT:    addl $20, %esp
+; X32-NEXT:    popl %ebx
+; X32-NEXT:    retl
   %t1 = atomicrmw xchg i64* @sc64, i64 %x acquire
-; X32:       lock
-; X32:       xchg8b
   ret void
-; X32:       ret
 }




More information about the llvm-commits mailing list